diff --git a/.cargo/config b/.cargo/config.toml similarity index 100% rename from .cargo/config rename to .cargo/config.toml diff --git a/.github/actions/dockerfiles/Dockerfile.alpine-binary b/.github/actions/dockerfiles/Dockerfile.alpine-binary index 1915a13d113..d185121e735 100644 --- a/.github/actions/dockerfiles/Dockerfile.alpine-binary +++ b/.github/actions/dockerfiles/Dockerfile.alpine-binary @@ -23,5 +23,18 @@ RUN case ${TARGETPLATFORM} in \ && unzip ${BIN_ARCH}.zip -d /out FROM --platform=${TARGETPLATFORM} alpine -COPY --from=builder /out/stacks-node /out/stacks-signer /bin/ -CMD ["stacks-node", "mainnet"] +COPY --from=builder /out/* /bin/ +ARG TAG + +RUN case "${TAG}" in \ + signer-*) \ + echo "/bin/stacks-signer run --config /signer-config.toml" > /tmp/command.sh \ + ;; \ + *) \ + echo "/bin/stacks-node mainnet" > /tmp/command.sh && \ + rm /bin/blockstack-cli /bin/clarity-cli /bin/relay-server /bin/stacks-events /bin/stacks-inspect \ + ;; \ + esac && \ + chmod +x /tmp/command.sh + +CMD ["sh", "-c", "/tmp/command.sh"] diff --git a/.github/actions/dockerfiles/Dockerfile.debian-binary b/.github/actions/dockerfiles/Dockerfile.debian-binary index 5432e923778..757379095c3 100644 --- a/.github/actions/dockerfiles/Dockerfile.debian-binary +++ b/.github/actions/dockerfiles/Dockerfile.debian-binary @@ -23,5 +23,18 @@ RUN case ${TARGETPLATFORM} in \ && unzip ${BIN_ARCH}.zip -d /out FROM --platform=${TARGETPLATFORM} debian:bookworm -COPY --from=builder /out/stacks-node /out/stacks-signer /bin/ -CMD ["stacks-node", "mainnet"] +COPY --from=builder /out/* /bin/ +ARG TAG + +RUN case "${TAG}" in \ + signer-*) \ + echo "/bin/stacks-signer run --config /signer-config.toml" > /tmp/command.sh \ + ;; \ + *) \ + echo "/bin/stacks-node mainnet" > /tmp/command.sh && \ + rm /bin/blockstack-cli /bin/clarity-cli /bin/relay-server /bin/stacks-events /bin/stacks-inspect \ + ;; \ + esac && \ + chmod +x /tmp/command.sh + +CMD ["sh", "-c", "/tmp/command.sh"] diff --git a/.github/workflows/bitcoin-tests.yml b/.github/workflows/bitcoin-tests.yml index 76ca85b6463..e14934558a5 100644 --- a/.github/workflows/bitcoin-tests.yml +++ b/.github/workflows/bitcoin-tests.yml @@ -81,16 +81,41 @@ jobs: - tests::nakamoto_integrations::correct_burn_outs - tests::nakamoto_integrations::vote_for_aggregate_key_burn_op - tests::nakamoto_integrations::follower_bootup - - tests::signer::stackerdb_dkg - - tests::signer::stackerdb_sign - - tests::signer::stackerdb_block_proposal - - tests::signer::stackerdb_filter_bad_transactions - - tests::signer::stackerdb_mine_2_nakamoto_reward_cycles - - tests::signer::stackerdb_sign_after_signer_reboot + - tests::nakamoto_integrations::forked_tenure_is_ignored + - tests::nakamoto_integrations::nakamoto_attempt_time + - tests::signer::v0::block_proposal_rejection + - tests::signer::v0::miner_gather_signatures + - tests::signer::v0::mine_2_nakamoto_reward_cycles + - tests::signer::v0::end_of_tenure + - tests::signer::v0::forked_tenure_okay + - tests::signer::v0::forked_tenure_invalid + - tests::signer::v0::empty_sortition + - tests::signer::v0::bitcoind_forking_test + - tests::signer::v0::multiple_miners + - tests::signer::v0::mock_sign_epoch_25 + - tests::signer::v0::signer_set_rollover + - tests::signer::v0::miner_forking + - tests::signer::v0::reloads_signer_set_in - tests::nakamoto_integrations::stack_stx_burn_op_integration_test + - tests::nakamoto_integrations::check_block_heights + - tests::nakamoto_integrations::clarity_burn_state + - tests::nakamoto_integrations::check_block_times + - tests::nakamoto_integrations::check_block_info + - tests::nakamoto_integrations::check_block_info_rewards + - tests::nakamoto_integrations::continue_tenure_extend + - tests::nakamoto_integrations::mock_mining + - tests::nakamoto_integrations::multiple_miners # Do not run this one until we figure out why it fails in CI # - tests::neon_integrations::bitcoin_reorg_flap # - tests::neon_integrations::bitcoin_reorg_flap_with_follower + # TODO: enable these once v1 signer is supported by a new nakamoto epoch + # - tests::signer::v1::dkg + # - tests::signer::v1::sign_request_rejected + # - tests::signer::v1::filter_bad_transactions + # - tests::signer::v1::delayed_dkg + # - tests::signer::v1::mine_2_nakamoto_reward_cycles + # - tests::signer::v1::sign_after_signer_reboot + # - tests::signer::v1::block_proposal steps: ## Setup test environment - name: Setup Test Environment diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 39048dc01b6..d1ae6522663 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -14,10 +14,6 @@ on: - "**.md" - "**.yml" workflow_dispatch: - inputs: - tag: - description: "The tag to create (optional)" - required: false pull_request: types: - opened @@ -34,7 +30,7 @@ concurrency: ## Always cancel duplicate jobs cancel-in-progress: true -run-name: ${{ inputs.tag }} +run-name: ${{ github.ref_name }} jobs: ## @@ -48,90 +44,99 @@ jobs: name: Rust Format runs-on: ubuntu-latest steps: - - name: Checkout the latest code - id: git_checkout - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 - - - name: Setup Rust Toolchain - id: setup_rust_toolchain - uses: actions-rust-lang/setup-rust-toolchain@f3c84ee10bf5a86e7a5d607d487bf17d57670965 # v1.5.0 - with: - components: rustfmt - cache: false - - name: Rustfmt id: rustfmt uses: stacks-network/actions/rustfmt@main with: alias: "fmt-stacks" + ###################################################################################### + ## Check if the branch that this workflow is being run against is a release branch + check-release: + name: Check Release + needs: + - rustfmt + runs-on: ubuntu-latest + outputs: + tag: ${{ steps.check_release.outputs.tag }} + docker_tag: ${{ steps.check_release.outputs.docker_tag }} + is_release: ${{ steps.check_release.outputs.is_release }} + steps: + - name: Check Release + id: check_release + uses: stacks-network/actions/stacks-core/check-release@main + with: + tag: ${{ github.ref_name }} + ###################################################################################### ## Create a tagged github release ## - ## Runs when the following is true: - ## - tag is provided + ## Runs when: + ## - it is a release run create-release: if: | - inputs.tag != '' + needs.check-release.outputs.is_release == 'true' name: Create Release needs: - rustfmt + - check-release uses: ./.github/workflows/github-release.yml with: - tag: ${{ inputs.tag }} + tag: ${{ needs.check-release.outputs.tag }} + docker_tag: ${{ needs.check-release.outputs.docker_tag }} secrets: inherit ## Build and push Debian image built from source ## ## Runs when: - ## - tag is not provided + ## - it is not a release run docker-image: if: | - inputs.tag == '' + needs.check-release.outputs.is_release != 'true' name: Docker Image (Source) uses: ./.github/workflows/image-build-source.yml needs: - rustfmt + - check-release secrets: inherit ## Create a reusable cache for tests ## ## Runs when: - ## - tag is provided + ## - it is a release run ## or: - ## - no tag provided + ## - it is not a release run ## and any of: ## - this workflow is called manually ## - PR is opened ## - commit to either (development, master) branch create-cache: if: | - inputs.tag != '' || ( - inputs.tag == '' && ( - github.event_name == 'workflow_dispatch' || - github.event_name == 'pull_request' || - github.event_name == 'merge_group' || - ( - contains(' - refs/heads/master - refs/heads/develop - refs/heads/next - ', github.event.pull_request.head.ref) && - github.event_name == 'push' - ) + needs.check-release.outputs.is_release == 'true' || ( + github.event_name == 'workflow_dispatch' || + github.event_name == 'pull_request' || + github.event_name == 'merge_group' || + ( + contains(' + refs/heads/master + refs/heads/develop + refs/heads/next + ', github.event.pull_request.head.ref) && + github.event_name == 'push' ) ) name: Create Test Cache needs: - rustfmt + - check-release uses: ./.github/workflows/create-cache.yml ## Tests to run regularly ## ## Runs when: - ## - tag is provided + ## - it is a release run ## or: - ## - no tag provided + ## - it is not a release run ## and any of: ## - this workflow is called manually ## - PR is opened @@ -139,75 +144,75 @@ jobs: ## - commit to either (development, next, master) branch stacks-core-tests: if: | - inputs.tag != '' || ( - inputs.tag == '' && ( - github.event_name == 'workflow_dispatch' || - github.event_name == 'pull_request' || - github.event_name == 'merge_group' || - ( - contains(' - refs/heads/master - refs/heads/develop - refs/heads/next - ', github.event.pull_request.head.ref) && - github.event_name == 'push' - ) + needs.check-release.outputs.is_release == 'true' || ( + github.event_name == 'workflow_dispatch' || + github.event_name == 'pull_request' || + github.event_name == 'merge_group' || + ( + contains(' + refs/heads/master + refs/heads/develop + refs/heads/next + ', github.event.pull_request.head.ref) && + github.event_name == 'push' ) ) name: Stacks Core Tests needs: - rustfmt - create-cache + - check-release uses: ./.github/workflows/stacks-core-tests.yml bitcoin-tests: if: | - inputs.tag != '' || ( - inputs.tag == '' && ( - github.event_name == 'workflow_dispatch' || - github.event_name == 'pull_request' || - github.event_name == 'merge_group' || - ( - contains(' - refs/heads/master - refs/heads/develop - refs/heads/next - ', github.event.pull_request.head.ref) && - github.event_name == 'push' - ) + needs.check-release.outputs.is_release == 'true' || ( + github.event_name == 'workflow_dispatch' || + github.event_name == 'pull_request' || + github.event_name == 'merge_group' || + ( + contains(' + refs/heads/master + refs/heads/develop + refs/heads/next + ', github.event.pull_request.head.ref) && + github.event_name == 'push' ) ) name: Bitcoin Tests needs: - rustfmt - create-cache + - check-release uses: ./.github/workflows/bitcoin-tests.yml ## Test to run on a tagged release ## ## Runs when: - ## - tag is provided + ## - it is a release run atlas-tests: - if: inputs.tag != '' + if: needs.check-release.outputs.is_release == 'true' name: Atlas Tests needs: - rustfmt - create-cache + - check-release uses: ./.github/workflows/atlas-tests.yml epoch-tests: - if: inputs.tag != '' + if: needs.check-release.outputs.is_release == 'true' name: Epoch Tests needs: - rustfmt - create-cache + - check-release uses: ./.github/workflows/epoch-tests.yml slow-tests: - if: inputs.tag != '' + if: needs.check-release.outputs.is_release == 'true' name: Slow Tests needs: - rustfmt - create-cache + - check-release uses: ./.github/workflows/slow-tests.yml - diff --git a/.github/workflows/clarity-js-sdk-pr.yml b/.github/workflows/clarity-js-sdk-pr.yml index 45238084100..6bcd555ca9f 100644 --- a/.github/workflows/clarity-js-sdk-pr.yml +++ b/.github/workflows/clarity-js-sdk-pr.yml @@ -28,7 +28,7 @@ jobs: steps: - name: Checkout latest clarity js sdk id: git_checkout - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 + uses: actions/checkout@44c2b7a8a4ea60a981eaca3cf939b5f4305c123b # v4.1.5 with: token: ${{ secrets.GH_TOKEN }} repository: ${{ env.CLARITY_JS_SDK_REPOSITORY }} @@ -46,7 +46,7 @@ jobs: - name: Create Pull Request id: create_pr - uses: peter-evans/create-pull-request@153407881ec5c347639a548ade7d8ad1d6740e38 # v5.0.2 + uses: peter-evans/create-pull-request@6d6857d36972b65feb161a90e484f2984215f83e # v6.0.5 with: token: ${{ secrets.GH_TOKEN }} commit-message: "chore: update clarity-native-bin tag" diff --git a/.github/workflows/docs-pr.yml b/.github/workflows/docs-pr.yml index 7543bdd7507..8b005e0402c 100644 --- a/.github/workflows/docs-pr.yml +++ b/.github/workflows/docs-pr.yml @@ -36,7 +36,7 @@ jobs: steps: - name: Checkout the latest code id: git_checkout - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 + uses: actions/checkout@44c2b7a8a4ea60a981eaca3cf939b5f4305c123b # v4.1.5 - name: Build docs id: build_docs @@ -46,7 +46,7 @@ jobs: - name: Checkout latest docs id: git_checkout_docs - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 + uses: actions/checkout@44c2b7a8a4ea60a981eaca3cf939b5f4305c123b # v4.1.5 with: token: ${{ secrets.DOCS_GITHUB_TOKEN }} repository: ${{ env.TARGET_REPOSITORY }} @@ -77,7 +77,7 @@ jobs: - name: Open PR id: open_pr if: ${{ steps.push.outputs.open_pr == '1' }} - uses: actions/github-script@d7906e4ad0b1822421a7e6a35d5ca353c962f410 # v6.4.1 + uses: actions/github-script@60a0d83039c74a4aee543508d2ffcb1c3799cdea # v7.0.1 with: github-token: ${{ secrets.DOCS_GITHUB_TOKEN }} script: | diff --git a/.github/workflows/github-release.yml b/.github/workflows/github-release.yml index 02243c4cbf0..9d4e18c6653 100644 --- a/.github/workflows/github-release.yml +++ b/.github/workflows/github-release.yml @@ -9,6 +9,10 @@ on: description: "Release Tag" required: true type: string + docker_tag: + description: "Docker Release Tag" + required: true + type: string secrets: GH_TOKEN: required: true @@ -48,25 +52,28 @@ jobs: ## Downloads the artifacts built in `create-source-binary.yml` - name: Download Artifacts id: download_artifacts - uses: actions/download-artifact@9bc31d5ccc31df68ecc42ccf4149144866c47d8a # v3.0.2 + uses: actions/download-artifact@65a9edc5881444af0b9093a5e628f2fe47ea3b2e # v4.1.7 with: - name: artifact + pattern: ${{ inputs.tag }}-binary-build-* path: release + merge-multiple: true ## Generate a checksums file to be added to the release page - name: Generate Checksums id: generate_checksum uses: stacks-network/actions/generate-checksum@main + with: + artifact_download_pattern: "${{ inputs.tag }}-binary-build-*" ## Upload the release archives with the checksums file - name: Upload Release id: upload_release - uses: softprops/action-gh-release@de2c0eb89ae2a093876385947365aca7b0e5f844 #v0.1.15 + uses: softprops/action-gh-release@69320dbe05506a9a39fc8ae11030b214ec2d1f87 #v2.0.5 env: GITHUB_TOKEN: ${{ secrets.GH_TOKEN }} with: - name: Release ${{ github.event.inputs.tag || github.ref }} - tag_name: ${{ github.event.inputs.tag || github.ref }} + name: Release ${{ inputs.tag || github.ref }} + tag_name: ${{ inputs.tag || github.ref }} draft: false prerelease: true fail_on_unmatched_files: true @@ -91,4 +98,5 @@ jobs: - create-release with: tag: ${{ inputs.tag }} + docker_tag: ${{ inputs.docker_tag }} secrets: inherit diff --git a/.github/workflows/image-build-binary.yml b/.github/workflows/image-build-binary.yml index 74415e7f16a..5966d7e68a4 100644 --- a/.github/workflows/image-build-binary.yml +++ b/.github/workflows/image-build-binary.yml @@ -6,6 +6,10 @@ on: workflow_call: inputs: tag: + required: true + type: string + description: "Version tag of release" + docker_tag: required: true type: string description: "Version tag for docker images" @@ -57,34 +61,81 @@ jobs: run: | echo "docker-org=${{ github.repository_owner }}" >> "$GITHUB_ENV" + - name: Check Signer Release + id: check_signer_release + run: | + case "${{ inputs.tag }}" in + signer-*) + echo "is-signer-release=true" >> $GITHUB_ENV + ;; + *) + echo "is-signer-release=false" >> $GITHUB_ENV + ;; + esac + ## Set docker metatdata ## - depending on the matrix.dist, different tags will be enabled ## ex. debian will have this tag: `type=ref,event=tag,enable=${{ matrix.dist == 'debian' }}` - name: Docker Metadata ( ${{matrix.dist}} ) - id: docker_metadata - uses: docker/metadata-action@96383f45573cb7f253c731d3b3ab81c87ef81934 #v5.0.0 + if: ${{ env.is-signer-release == 'true' }} + id: docker_metadata_signer + uses: docker/metadata-action@8e5442c4ef9f78752691e2d8f8d19755c6f78e81 #v5.5.1 + with: + images: | + ${{env.docker-org}}/stacks-signer + tags: | + type=raw,value=latest,enable=${{ inputs.docker_tag != '' && (github.ref == format('refs/heads/{0}', github.event.repository.default_branch) ) && matrix.dist == 'debian' }} + type=raw,value=${{ inputs.docker_tag }}-${{ matrix.dist }},enable=${{ inputs.docker_tag != '' && matrix.dist == 'debian'}} + type=raw,value=${{ inputs.docker_tag }},enable=${{ inputs.docker_tag != '' && matrix.dist == 'debian' }} + type=ref,event=tag,enable=${{ matrix.dist == 'debian' }} + type=raw,value=latest-${{ matrix.dist }},enable=${{ inputs.docker_tag != '' && (github.ref == format('refs/heads/{0}', github.event.repository.default_branch) ) && matrix.dist == 'alpine' }} + type=raw,value=${{ inputs.docker_tag }}-${{ matrix.dist }},enable=${{ inputs.docker_tag != '' && matrix.dist == 'alpine' }} + + - name: Docker Metadata ( ${{matrix.dist}} ) + if: ${{ env.is-signer-release == 'false' }} + id: docker_metadata_node + uses: docker/metadata-action@8e5442c4ef9f78752691e2d8f8d19755c6f78e81 #v5.5.1 with: ## tag images with current repo name `stacks-core` as well as legacy `stacks-blockchain` images: | ${{env.docker-org}}/${{ github.event.repository.name }} ${{env.docker-org}}/stacks-blockchain tags: | - type=raw,value=latest,enable=${{ inputs.tag != '' && (github.ref == format('refs/heads/{0}', github.event.repository.default_branch) ) && matrix.dist == 'debian' }} - type=raw,value=${{ inputs.tag }}-${{ matrix.dist }},enable=${{ inputs.tag != '' && matrix.dist == 'debian'}} - type=raw,value=${{ inputs.tag }},enable=${{ inputs.tag != '' && matrix.dist == 'debian' }} + type=raw,value=latest,enable=${{ inputs.docker_tag != '' && (github.ref == format('refs/heads/{0}', github.event.repository.default_branch) ) && matrix.dist == 'debian' }} + type=raw,value=${{ inputs.docker_tag }}-${{ matrix.dist }},enable=${{ inputs.docker_tag != '' && matrix.dist == 'debian'}} + type=raw,value=${{ inputs.docker_tag }},enable=${{ inputs.docker_tag != '' && matrix.dist == 'debian' }} type=ref,event=tag,enable=${{ matrix.dist == 'debian' }} - type=raw,value=latest-${{ matrix.dist }},enable=${{ inputs.tag != '' && (github.ref == format('refs/heads/{0}', github.event.repository.default_branch) ) && matrix.dist == 'alpine' }} - type=raw,value=${{ inputs.tag }}-${{ matrix.dist }},enable=${{ inputs.tag != '' && matrix.dist == 'alpine' }} + type=raw,value=latest-${{ matrix.dist }},enable=${{ inputs.docker_tag != '' && (github.ref == format('refs/heads/{0}', github.event.repository.default_branch) ) && matrix.dist == 'alpine' }} + type=raw,value=${{ inputs.docker_tag }}-${{ matrix.dist }},enable=${{ inputs.docker_tag != '' && matrix.dist == 'alpine' }} + + ## Build docker image for signer release + - name: Build and Push ( ${{matrix.dist}} ) + if: ${{ env.is-signer-release == 'true' }} + id: docker_build_signer + uses: docker/build-push-action@2cdde995de11925a030ce8070c3d77a52ffcf1c0 # v5.3.0 + with: + file: ./.github/actions/dockerfiles/Dockerfile.${{ matrix.dist }}-binary + platforms: ${{ env.docker_platforms }} + tags: ${{ steps.docker_metadata_signer.outputs.tags }} + labels: ${{ steps.docker_metadata_signer.outputs.labels }} + build-args: | + TAG=${{ inputs.tag }} + REPO=${{ github.repository_owner }}/${{ github.event.repository.name }} + STACKS_NODE_VERSION=${{ inputs.tag || env.GITHUB_SHA_SHORT }} + GIT_BRANCH=${{ env.GITHUB_REF_SHORT }} + GIT_COMMIT=${{ env.GITHUB_SHA_SHORT }} + push: ${{ env.DOCKER_PUSH }} - ## Build docker image for release + ## Build docker image for node release - name: Build and Push ( ${{matrix.dist}} ) - id: docker_build - uses: docker/build-push-action@0565240e2d4ab88bba5387d719585280857ece09 # v5.0.0 + if: ${{ env.is-signer-release == 'false' }} + id: docker_build_node + uses: docker/build-push-action@2cdde995de11925a030ce8070c3d77a52ffcf1c0 # v5.3.0 with: file: ./.github/actions/dockerfiles/Dockerfile.${{ matrix.dist }}-binary platforms: ${{ env.docker_platforms }} - tags: ${{ steps.docker_metadata.outputs.tags }} - labels: ${{ steps.docker_metadata.outputs.labels }} + tags: ${{ steps.docker_metadata_node.outputs.tags }} + labels: ${{ steps.docker_metadata_node.outputs.labels }} build-args: | TAG=${{ inputs.tag }} REPO=${{ github.repository_owner }}/${{ github.event.repository.name }} diff --git a/.github/workflows/image-build-source.yml b/.github/workflows/image-build-source.yml index ebb9afc6790..e45455f05b6 100644 --- a/.github/workflows/image-build-source.yml +++ b/.github/workflows/image-build-source.yml @@ -49,7 +49,7 @@ jobs: ## Set docker metatdata - name: Docker Metadata ( ${{matrix.dist}} ) id: docker_metadata - uses: docker/metadata-action@96383f45573cb7f253c731d3b3ab81c87ef81934 #v5.0.0 + uses: docker/metadata-action@8e5442c4ef9f78752691e2d8f8d19755c6f78e81 #v5.5.1 with: images: | ${{env.docker-org}}/${{ github.event.repository.name }} @@ -61,7 +61,7 @@ jobs: ## Build docker image - name: Build and Push ( ${{matrix.dist}} ) id: docker_build - uses: docker/build-push-action@0565240e2d4ab88bba5387d719585280857ece09 # v5.0.0 + uses: docker/build-push-action@2cdde995de11925a030ce8070c3d77a52ffcf1c0 # v5.3.0 with: file: ./.github/actions/dockerfiles/Dockerfile.${{matrix.dist}}-source platforms: ${{ env.docker_platforms }} diff --git a/.github/workflows/lock-threads.yml b/.github/workflows/lock-threads.yml new file mode 100644 index 00000000000..83b5a6b63f1 --- /dev/null +++ b/.github/workflows/lock-threads.yml @@ -0,0 +1,36 @@ +## Workflow to lock closed PRs/issues/discussions +## timeframe to lock defaults to: +## issues: 30 days +## prs: 30 days +## discussions: 365 days + +name: "Lock Threads" + +on: + schedule: + - cron: "0 0 * * *" + workflow_dispatch: + +permissions: + issues: write + pull-requests: write + discussions: write + +concurrency: + group: lock-threads + +jobs: + ## Lock closed issues/prs/discussions + lock: + name: Lock Threads + runs-on: ubuntu-latest + steps: + - name: Lock Threads + id: lock_threads + uses: stacks-network/actions/lock-threads@main + with: + github-token: ${{ secrets.GH_TOKEN }} + issue-inactive-days: 7 + pr-inactive-days: 7 + discussion-inactive-days: 7 + diff --git a/.github/workflows/pr-differences-mutants.yml b/.github/workflows/pr-differences-mutants.yml index fc4a7256873..788e4ccb520 100644 --- a/.github/workflows/pr-differences-mutants.yml +++ b/.github/workflows/pr-differences-mutants.yml @@ -9,6 +9,11 @@ on: - ready_for_review paths: - '**.rs' + workflow_dispatch: + inputs: + automated: + description: "Set to 'false' to ignore mutants limit." + required: true concurrency: group: pr-differences-${{ github.head_ref || github.ref || github.run_id }} @@ -33,7 +38,9 @@ jobs: steps: - id: check_packages_and_shards - uses: stacks-network/actions/stacks-core/mutation-testing/check-packages-and-shards@main + uses: stacks-network/actions/stacks-core/mutation-testing/check-packages-and-shards@feat/mutation-testing + with: + automated: ${{ inputs.automated }} # Mutation testing - Execute on PR on small packages that have functions modified (normal run, no shards) pr-differences-mutants-small-normal: @@ -49,7 +56,7 @@ jobs: steps: - name: Run mutants on diffs - uses: stacks-network/actions/stacks-core/mutation-testing/pr-differences@main + uses: stacks-network/actions/stacks-core/mutation-testing/pr-differences@feat/mutation-testing with: package: 'small' @@ -72,7 +79,7 @@ jobs: steps: - name: Run mutants on diffs - uses: stacks-network/actions/stacks-core/mutation-testing/pr-differences@main + uses: stacks-network/actions/stacks-core/mutation-testing/pr-differences@feat/mutation-testing with: shard: ${{ matrix.shard }} package: 'small' @@ -94,7 +101,7 @@ jobs: env: BITCOIND_TEST: 1 RUST_BACKTRACE: full - uses: stacks-network/actions/stacks-core/mutation-testing/pr-differences@main + uses: stacks-network/actions/stacks-core/mutation-testing/pr-differences@feat/mutation-testing with: package: 'stackslib' @@ -120,7 +127,7 @@ jobs: env: BITCOIND_TEST: 1 RUST_BACKTRACE: full - uses: stacks-network/actions/stacks-core/mutation-testing/pr-differences@main + uses: stacks-network/actions/stacks-core/mutation-testing/pr-differences@feat/mutation-testing with: shard: ${{ matrix.shard }} package: 'stackslib' @@ -142,7 +149,7 @@ jobs: env: BITCOIND_TEST: 1 RUST_BACKTRACE: full - uses: stacks-network/actions/stacks-core/mutation-testing/pr-differences@main + uses: stacks-network/actions/stacks-core/mutation-testing/pr-differences@feat/mutation-testing with: package: 'stacks-node' @@ -168,7 +175,7 @@ jobs: env: BITCOIND_TEST: 1 RUST_BACKTRACE: full - uses: stacks-network/actions/stacks-core/mutation-testing/pr-differences@main + uses: stacks-network/actions/stacks-core/mutation-testing/pr-differences@feat/mutation-testing with: shard: ${{ matrix.shard }} package: 'stacks-node' @@ -186,7 +193,7 @@ jobs: steps: - name: Run mutants on diffs - uses: stacks-network/actions/stacks-core/mutation-testing/pr-differences@main + uses: stacks-network/actions/stacks-core/mutation-testing/pr-differences@feat/mutation-testing with: package: 'stacks-signer' @@ -211,7 +218,7 @@ jobs: steps: - name: Output Mutants - uses: stacks-network/actions/stacks-core/mutation-testing/output-pr-mutants@main + uses: stacks-network/actions/stacks-core/mutation-testing/output-pr-mutants@feat/mutation-testing with: stackslib_package: ${{ needs.check-big-packages-and-shards.outputs.run_stackslib }} shards_for_stackslib_package: ${{ needs.check-big-packages-and-shards.outputs.stackslib_with_shards }} diff --git a/.github/workflows/slow-tests.yml b/.github/workflows/slow-tests.yml index bce6a15a1f0..02c5bdf552d 100644 --- a/.github/workflows/slow-tests.yml +++ b/.github/workflows/slow-tests.yml @@ -32,6 +32,7 @@ jobs: test-name: - tests::epoch_21::test_pox_reorg_flap_duel - tests::epoch_21::test_pox_reorg_flap_reward_cycles + - tests::nakamoto_integrations::check_block_info_rewards steps: ## Setup test environment - name: Setup Test Environment diff --git a/.github/workflows/stacks-core-tests.yml b/.github/workflows/stacks-core-tests.yml index 3195f279fcb..98eb5cf92c0 100644 --- a/.github/workflows/stacks-core-tests.yml +++ b/.github/workflows/stacks-core-tests.yml @@ -127,7 +127,7 @@ jobs: ## checkout the code - name: Checkout the latest code id: git_checkout - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 + uses: actions/checkout@44c2b7a8a4ea60a981eaca3cf939b5f4305c123b # v4.1.5 - name: Run network relay tests id: nettest @@ -145,10 +145,10 @@ jobs: steps: - name: Checkout the latest code id: git_checkout - uses: actions/checkout@v3 + uses: actions/checkout@44c2b7a8a4ea60a981eaca3cf939b5f4305c123b # v4.1.5 - name: Execute core contract unit tests with clarinet-sdk id: clarinet_unit_test - uses: actions/setup-node@v3 + uses: actions/setup-node@60edb5dd545a775178f52524783378180af0d1f8 # v4.0.2 with: node-version: 18.x cache: "npm" @@ -174,7 +174,7 @@ jobs: steps: - name: Checkout the latest code id: git_checkout - uses: actions/checkout@v3 + uses: actions/checkout@44c2b7a8a4ea60a981eaca3cf939b5f4305c123b # v4.1.5 - name: Execute core contract unit tests in Clarinet id: clarinet_unit_test_v1 uses: docker://hirosystems/clarinet:1.7.1 @@ -187,7 +187,6 @@ jobs: if: always() needs: - full-genesis - - unit-tests - open-api-validation - core-contracts-clarinet-test steps: diff --git a/.vscode/extensions.json b/.vscode/extensions.json index be7e11c2a88..00035443cbf 100644 --- a/.vscode/extensions.json +++ b/.vscode/extensions.json @@ -2,7 +2,7 @@ "recommendations": [ "rust-lang.rust-analyzer", "vadimcn.vscode-lldb", - "serayuzgur.crates", + "fill-labs.dependi", "editorconfig.editorconfig", ] } diff --git a/CHANGELOG.md b/CHANGELOG.md index c318efebc87..eb6061cb9d4 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -5,6 +5,62 @@ All notable changes to this project will be documented in this file. The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), and this project adheres to the versioning scheme outlined in the [README.md](README.md). +## [Unreleased] + +- Added support for Clarity 3 + - Keywords / variable + - `tenure-height` added + - `stacks-block-height` added + - `block-height` removed + - Functions + - `get-stacks-block-info?` added + - `get-tenure-info?` added + - `get-block-info?` removed + +## [2.5.0.0.5] + +### Added + +- Added configuration option `connections.antientropy_retry` (#4932) + +### Changed + +- Set default antientropy_retry to run once per hour (#4935) + +## [2.5.0.0.4] + +### Added + +- Adds the solo stacking scenarios to the stateful property-based testing strategy for PoX-4 (#4725) +- Add signer-key to synthetic stack-aggregation-increase event (#4728) +- Implement the assumed total commit with carry-over (ATC-C) strategy for denying opportunistic Bitcoin miners from mining Stacks at a discount (#4733) +- Adding support for stacks-block-height and tenure-height in Clarity 3 (#4745) +- Preserve PeerNetwork struct when transitioning to 3.0 (#4767) +- Implement singer monitor server error (#4773) +- Pull current stacks signer out into v1 implementation and create placeholder v0 mod (#4778) +- Create new block signature message type for v0 signer (#4787) +- Isolate the rusqlite dependency in stacks-common and clarity behind a cargo feature (#4791) +- Add next_initiative_delay config option to control how frequently the miner checks if a new burnchain block has been processed (#4795) +- Various performance improvements and cleanup + +### Changed + +- Downgraded log messages about transactions from warning to info (#4697) +- Fix race condition between the signer binary and the /v2/pox endpoint (#4738) +- Make node config mock_miner item hot-swappable (#4743) +- Mandates that a burnchain block header be resolved by a BurnchainHeaderReader, which will resolve a block height to at most one burnchain header (#4748) +- Optional config option to resolve DNS of bootstrap nodes (#4749) +- Limit inventory syncs with new peers (#4750) +- Update /v2/fees/transfer to report the median transaction fee estimate for a STX-transfer of 180 bytes (#4754) +- Reduce connection spamming in stackerdb (#4759) +- Remove deprecated signer cli commands (#4772) +- Extra pair of signer slots got introduced at the epoch 2.5 boundary (#4845, #4868, #4891) +- Never consider Stacks chain tips that are not on the canonical burn chain #4886 (#4893) + +### Fixed + +- Allow Nakamoto blocks to access the burn block associated with the current tenure (#4333) + ## [2.5.0.0.3] This release fixes a regression in `2.5.0.0.0` from `2.4.0.1.0` caused by git merge @@ -22,7 +78,6 @@ This is the first consensus-critical release for Nakamoto. Nodes which do not up **This is a required release before Nakamoto rules are enabled in 3.0.** - ### Timing of Release from 2.5 to 3.0 Activating Nakamoto will include two epochs: diff --git a/CODEOWNERS b/CODEOWNERS new file mode 100644 index 00000000000..b30973662f6 --- /dev/null +++ b/CODEOWNERS @@ -0,0 +1,20 @@ +# These owners will be the default owners for everything in +# the repo. Unless a later match takes precedence, +# @stacks-network/blockchain-team-codeowners will be requested for +# review when someone opens a pull request. +* @stacks-network/blockchain-team-codeowners + +# Generic file extensions that shouldn't require much scrutiny. Anyone with write access to the repo may approve a PR +*.md @stacks-network/blockchain-team +*.yml @stacks-network/blockchain-team +*.yaml @stacks-network/blockchain-team +*.txt @stacks-network/blockchain-team +*.toml @stacks-network/blockchain-team + +# Signer code +libsigner/**/*.rs @stacks-network/blockchain-team-signer +stacks-signer/**/*.rs @stacks-network/blockchain-team-signer + +# CI workflows +/.github/workflows/ @stacks-network/blockchain-team-ci +/.github/actions/ @stacks-network/blockchain-team-ci diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 7609cba29f7..5bdfbfc8eac 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -43,6 +43,11 @@ Branch names should use a prefix that conveys the overall goal of the branch: - `test/more-coverage` for branches that only add more tests - `refactor/formatting-fix` for refactors +The branch suffix must only include ASCII lowercase and uppercase letters, +digits, underscores, periods and dashes. + +The full branch name must be max 128 characters long. + ### Merging PRs from Forks PRs from forks or opened by contributors without commit access require diff --git a/Cargo.lock b/Cargo.lock index 9cfd1ad9a1e..357a9def709 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -112,12 +112,6 @@ dependencies = [ "opaque-debug", ] -[[package]] -name = "ahash" -version = "0.4.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0453232ace82dee0dd0b4c87a59bd90f7b53b314f3e0f61fe2ee7c8a16482289" - [[package]] name = "ahash" version = "0.8.8" @@ -717,7 +711,7 @@ name = "clarity" version = "0.0.1" dependencies = [ "assert-json-diff", - "hashbrown 0.14.3", + "hashbrown", "integer-sqrt", "lazy_static", "mutants", @@ -1188,9 +1182,9 @@ dependencies = [ [[package]] name = "fallible-iterator" -version = "0.2.0" +version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4443176a9f2c162692bd3d352d745ef9413eec5782a80d8fd6f8a1ac692a07f7" +checksum = "2acce4a10f12dc2fb14a218589d4f1f62ef011b2d0cc4b3cb1bba8e94da14649" [[package]] name = "fallible-streaming-iterator" @@ -1495,33 +1489,24 @@ version = "1.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "eabb4a44450da02c90444cf74558da904edde8fb4e9035a9a6a4e15445af0bd7" -[[package]] -name = "hashbrown" -version = "0.9.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d7afe4a420e3fe79967a00898cc1f4db7c8a49a9333a29f8a4bd76a253d5cd04" -dependencies = [ - "ahash 0.4.8", -] - [[package]] name = "hashbrown" version = "0.14.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "290f1a1d9242c78d09ce40a5e87e7554ee637af1351968159f4952f028f75604" dependencies = [ - "ahash 0.8.8", + "ahash", "allocator-api2", "serde", ] [[package]] name = "hashlink" -version = "0.6.0" +version = "0.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d99cf782f0dc4372d26846bec3de7804ceb5df083c2d4462c0b8d2330e894fa8" +checksum = "6ba4ff7128dee98c7dc9794b6a411377e1404dba1c97deb8d1a55297bd25d8af" dependencies = [ - "hashbrown 0.9.1", + "hashbrown", ] [[package]] @@ -1758,7 +1743,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "233cf39063f058ea2caae4091bf4a3ef70a653afbc026f5c4a4135d114e3c177" dependencies = [ "equivalent", - "hashbrown 0.14.3", + "hashbrown", ] [[package]] @@ -1920,9 +1905,12 @@ name = "libsigner" version = "0.0.1" dependencies = [ "clarity", - "hashbrown 0.14.3", + "hashbrown", + "lazy_static", "libc", "libstackerdb", + "mutants", + "prometheus", "rand 0.8.5", "rand_core 0.6.4", "secp256k1", @@ -1943,9 +1931,9 @@ dependencies = [ [[package]] name = "libsqlite3-sys" -version = "0.20.1" +version = "0.28.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "64d31059f22935e6c31830db5249ba2b7ecd54fd73a9909286f0a67aa55c2fbd" +checksum = "0c10584274047cb335c23d3e61bcef8e323adae7c5c8c760540f73610177fc3f" dependencies = [ "cc", "pkg-config", @@ -2446,6 +2434,7 @@ name = "pox-locking" version = "2.4.0" dependencies = [ "clarity", + "mutants", "slog", "stacks-common", ] @@ -2718,7 +2707,7 @@ checksum = "c08c74e62047bb2de4ff487b251e4a92e24f48745648451635cec7d591162d9f" name = "relay-server" version = "0.0.1" dependencies = [ - "hashbrown 0.14.3", + "hashbrown", ] [[package]] @@ -2870,17 +2859,15 @@ dependencies = [ [[package]] name = "rusqlite" -version = "0.24.2" +version = "0.31.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d5f38ee71cbab2c827ec0ac24e76f82eca723cee92c509a65f67dee393c25112" +checksum = "b838eba278d213a8beaf485bd313fd580ca4505a00d5871caeb1457c55322cae" dependencies = [ - "bitflags 1.3.2", - "byteorder", + "bitflags 2.4.2", "fallible-iterator", "fallible-streaming-iterator", "hashlink", "libsqlite3-sys", - "memchr", "serde_json", "smallvec", ] @@ -3291,7 +3278,7 @@ dependencies = [ "serde", "serde_json", "slog", - "time 0.3.34", + "time 0.3.36", ] [[package]] @@ -3304,7 +3291,7 @@ dependencies = [ "slog", "term", "thread_local", - "time 0.3.34", + "time 0.3.36", ] [[package]] @@ -3376,7 +3363,7 @@ dependencies = [ "chrono", "curve25519-dalek 2.0.0", "ed25519-dalek", - "hashbrown 0.14.3", + "hashbrown", "lazy_static", "libc", "nix", @@ -3412,7 +3399,7 @@ dependencies = [ "base64 0.12.3", "chrono", "clarity", - "hashbrown 0.14.3", + "hashbrown", "http-types", "lazy_static", "libc", @@ -3449,11 +3436,13 @@ dependencies = [ "backoff", "clap 4.5.0", "clarity", - "hashbrown 0.14.3", + "hashbrown", + "lazy_static", "libsigner", "libstackerdb", "num-traits", "polynomial", + "prometheus", "rand 0.8.5", "rand_core 0.6.4", "reqwest", @@ -3469,6 +3458,7 @@ dependencies = [ "stacks-common", "stackslib", "thiserror", + "tiny_http", "toml 0.5.11", "tracing", "tracing-subscriber", @@ -3486,7 +3476,7 @@ dependencies = [ "criterion", "curve25519-dalek 2.0.0", "ed25519-dalek", - "hashbrown 0.14.3", + "hashbrown", "integer-sqrt", "lazy_static", "libc", @@ -3770,9 +3760,9 @@ dependencies = [ [[package]] name = "time" -version = "0.3.34" +version = "0.3.36" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c8248b6521bb14bc45b4067159b9b6ad792e2d6d754d6c41fb50e29fefe38749" +checksum = "5dfd88e563464686c916c7e46e623e520ddc6d79fa6641390f2e3fa86e83e885" dependencies = [ "deranged", "itoa", @@ -3782,7 +3772,7 @@ dependencies = [ "powerfmt", "serde", "time-core", - "time-macros 0.2.17", + "time-macros 0.2.18", ] [[package]] @@ -3803,9 +3793,9 @@ dependencies = [ [[package]] name = "time-macros" -version = "0.2.17" +version = "0.2.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7ba3a3ef41e6672a2f0f001392bb5dcd3ff0a9992d618ca761a11c3121547774" +checksum = "3f252a68540fde3a3877aeea552b832b40ab9a69e318efd078774a01ddee1ccf" dependencies = [ "num-conv", "time-core", @@ -4604,7 +4594,7 @@ checksum = "9c80d57a61294350ed91e91eb20a6c34da084ec8f15d039bab79ce3efabbd1a4" dependencies = [ "aes-gcm 0.10.3", "bs58 0.5.0", - "hashbrown 0.14.3", + "hashbrown", "hex", "num-traits", "p256k1", diff --git a/Cargo.toml b/Cargo.toml index feab983833c..8ac168f1f7e 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -21,6 +21,7 @@ rand = "0.8" rand_chacha = "0.3.1" tikv-jemallocator = "0.5.4" wsts = { version = "9.0.0", default-features = false } +rusqlite = { version = "0.31.0", features = ["blob", "serde_json", "i128_blob", "bundled", "trace"] } # Use a bit more than default optimization for # dev builds to speed up test execution diff --git a/Dockerfile b/Dockerfile index 055cc3df764..5cfacc8ab08 100644 --- a/Dockerfile +++ b/Dockerfile @@ -12,7 +12,7 @@ RUN apk add --no-cache musl-dev RUN mkdir /out -RUN cd testnet/stacks-node && cargo build --features monitoring_prom,slog_json --release +RUN cargo build --features monitoring_prom,slog_json --release RUN cp target/release/stacks-node /out diff --git a/Dockerfile.debian b/Dockerfile.debian index 8b6759527ed..ce219640391 100644 --- a/Dockerfile.debian +++ b/Dockerfile.debian @@ -10,7 +10,7 @@ COPY . . RUN mkdir /out -RUN cd testnet/stacks-node && cargo build --features monitoring_prom,slog_json --release +RUN cargo build --features monitoring_prom,slog_json --release RUN cp target/release/stacks-node /out diff --git a/README.md b/README.md index 3f91b1a9f21..6cdb42857f4 100644 --- a/README.md +++ b/README.md @@ -55,7 +55,7 @@ _Note on building_: you may set `RUSTFLAGS` to build binaries for your native cp RUSTFLAGS="-Ctarget-cpu=native" ``` -or uncomment these lines in `./cargo/config`: +or uncomment these lines in `./cargo/config.toml`: ``` # [build] @@ -87,7 +87,7 @@ cd testnet/stacks-node cargo run --bin stacks-node -- start --config ./conf/testnet-follower-conf.toml ``` -_On Windows, many tests will fail if the line endings aren't `LF`. Please ensure that you are have git's `core.autocrlf` set to `input` when you clone the repository to avoid any potential issues. This is due to the Clarity language currently being sensitive to line endings._ +_On Windows, many tests will fail if the line endings aren't `LF`. Please ensure that you have git's `core.autocrlf` set to `input` when you clone the repository to avoid any potential issues. This is due to the Clarity language currently being sensitive to line endings._ Additional testnet documentation is available [here](./docs/testnet.md) and [here](https://docs.stacks.co/docs/nodes-and-miners/miner-testnet) diff --git a/clarity/Cargo.toml b/clarity/Cargo.toml index 70cbcec5857..284e856e498 100644 --- a/clarity/Cargo.toml +++ b/clarity/Cargo.toml @@ -27,33 +27,33 @@ regex = "1" lazy_static = "1.4.0" integer-sqrt = "0.1.3" slog = { version = "2.5.2", features = [ "max_level_trace" ] } -stacks_common = { package = "stacks-common", path = "../stacks-common" } +stacks_common = { package = "stacks-common", path = "../stacks-common", optional = true, default-features = false } rstest = "0.17.0" rstest_reuse = "0.5.0" hashbrown = { workspace = true } -mutants = "0.0.3" +rusqlite = { workspace = true, optional = true} [dependencies.serde_json] version = "1.0" features = ["arbitrary_precision", "unbounded_depth"] -[dependencies.rusqlite] -version = "=0.24.2" -features = ["blob", "serde_json", "i128_blob", "bundled", "trace"] - [dependencies.time] version = "0.2.23" features = ["std"] [dev-dependencies] assert-json-diff = "1.0.0" +mutants = "0.0.3" # a nightly rustc regression (35dbef235 2021-03-02) prevents criterion from compiling # but it isn't necessary for tests: only benchmarks. therefore, commenting out for now. # criterion = "0.3" [features] -default = [] -developer-mode = [] +default = ["canonical"] +canonical = ["rusqlite", "stacks_common/canonical"] +developer-mode = ["stacks_common/developer-mode"] slog_json = ["stacks_common/slog_json"] -testing = [] +testing = ["canonical"] devtools = [] +rollback_value_check = [] +disable-costs = [] diff --git a/clarity/src/vm/analysis/arithmetic_checker/mod.rs b/clarity/src/vm/analysis/arithmetic_checker/mod.rs index 5595905a484..aa69f650f01 100644 --- a/clarity/src/vm/analysis/arithmetic_checker/mod.rs +++ b/clarity/src/vm/analysis/arithmetic_checker/mod.rs @@ -148,7 +148,7 @@ impl<'a> ArithmeticOnlyChecker<'a> { { match native_var { ContractCaller | TxSender | TotalLiquidMicroSTX | BlockHeight | BurnBlockHeight - | Regtest | TxSponsor | Mainnet | ChainId => { + | Regtest | TxSponsor | Mainnet | ChainId | StacksBlockHeight | TenureHeight => { Err(Error::VariableForbidden(native_var)) } NativeNone | NativeTrue | NativeFalse => Ok(()), @@ -174,13 +174,12 @@ impl<'a> ArithmeticOnlyChecker<'a> { ) -> Result<(), Error> { use crate::vm::functions::NativeFunctions::*; match function { - FetchVar | GetBlockInfo | GetBurnBlockInfo | GetTokenBalance | GetAssetOwner - | FetchEntry | SetEntry | DeleteEntry | InsertEntry | SetVar | MintAsset - | MintToken | TransferAsset | TransferToken | ContractCall | StxTransfer - | StxTransferMemo | StxBurn | AtBlock | GetStxBalance | GetTokenSupply | BurnToken - | FromConsensusBuff | ToConsensusBuff | BurnAsset | StxGetAccount => { - Err(Error::FunctionNotPermitted(function)) - } + FetchVar | GetBlockInfo | GetBurnBlockInfo | GetStacksBlockInfo | GetTenureInfo + | GetTokenBalance | GetAssetOwner | FetchEntry | SetEntry | DeleteEntry + | InsertEntry | SetVar | MintAsset | MintToken | TransferAsset | TransferToken + | ContractCall | StxTransfer | StxTransferMemo | StxBurn | AtBlock | GetStxBalance + | GetTokenSupply | BurnToken | FromConsensusBuff | ToConsensusBuff | BurnAsset + | StxGetAccount => Err(Error::FunctionNotPermitted(function)), Append | Concat | AsMaxLen | ContractOf | PrincipalOf | ListCons | Print | AsContract | ElementAt | ElementAtAlias | IndexOf | IndexOfAlias | Map | Filter | Fold | Slice | ReplaceAt => Err(Error::FunctionNotPermitted(function)), diff --git a/clarity/src/vm/analysis/arithmetic_checker/tests.rs b/clarity/src/vm/analysis/arithmetic_checker/tests.rs index 4ad02c08d5e..0e7d520cb3b 100644 --- a/clarity/src/vm/analysis/arithmetic_checker/tests.rs +++ b/clarity/src/vm/analysis/arithmetic_checker/tests.rs @@ -22,12 +22,13 @@ use stacks_common::types::StacksEpochId; use crate::vm::analysis::arithmetic_checker::Error::*; use crate::vm::analysis::arithmetic_checker::{ArithmeticOnlyChecker, Error}; -use crate::vm::analysis::{mem_type_check, ContractAnalysis}; +use crate::vm::analysis::ContractAnalysis; use crate::vm::ast::parse; use crate::vm::costs::LimitedCostTracker; use crate::vm::functions::define::DefineFunctions; use crate::vm::functions::NativeFunctions; use crate::vm::tests::test_clarity_versions; +use crate::vm::tooling::mem_type_check; use crate::vm::types::QualifiedContractIdentifier; use crate::vm::variables::NativeVariables; use crate::vm::ClarityVersion; diff --git a/clarity/src/vm/analysis/errors.rs b/clarity/src/vm/analysis/errors.rs index 71fefb64571..f86308f8d9c 100644 --- a/clarity/src/vm/analysis/errors.rs +++ b/clarity/src/vm/analysis/errors.rs @@ -132,10 +132,15 @@ pub enum CheckErrors { // get-block-info? errors NoSuchBlockInfoProperty(String), NoSuchBurnBlockInfoProperty(String), + NoSuchStacksBlockInfoProperty(String), + NoSuchTenureInfoProperty(String), GetBlockInfoExpectPropertyName, GetBurnBlockInfoExpectPropertyName, + GetStacksBlockInfoExpectPropertyName, + GetTenureInfoExpectPropertyName, NameAlreadyUsed(String), + ReservedWord(String), // expect a function, or applying a function to a list NonFunctionApplication, @@ -405,9 +410,14 @@ impl DiagnosableError for CheckErrors { CheckErrors::ExpectedCallableType(found_type) => format!("expected a callable contract, found {}", found_type), CheckErrors::NoSuchBlockInfoProperty(property_name) => format!("use of block unknown property '{}'", property_name), CheckErrors::NoSuchBurnBlockInfoProperty(property_name) => format!("use of burn block unknown property '{}'", property_name), + CheckErrors::NoSuchStacksBlockInfoProperty(property_name) => format!("use of unknown stacks block property '{}'", property_name), + CheckErrors::NoSuchTenureInfoProperty(property_name) => format!("use of unknown tenure property '{}'", property_name), CheckErrors::GetBlockInfoExpectPropertyName => "missing property name for block info introspection".into(), CheckErrors::GetBurnBlockInfoExpectPropertyName => "missing property name for burn block info introspection".into(), + CheckErrors::GetStacksBlockInfoExpectPropertyName => "missing property name for stacks block info introspection".into(), + CheckErrors::GetTenureInfoExpectPropertyName => "missing property name for tenure info introspection".into(), CheckErrors::NameAlreadyUsed(name) => format!("defining '{}' conflicts with previous value", name), + CheckErrors::ReservedWord(name) => format!("{name} is a reserved word"), CheckErrors::NonFunctionApplication => "expecting expression of type function".into(), CheckErrors::ExpectedListApplication => "expecting expression of type list".into(), CheckErrors::ExpectedSequence(found_type) => format!("expecting expression of type 'list', 'buff', 'string-ascii' or 'string-utf8' - found '{}'", found_type), diff --git a/clarity/src/vm/analysis/mod.rs b/clarity/src/vm/analysis/mod.rs index 4da10f88bf9..6a8f64f1b26 100644 --- a/clarity/src/vm/analysis/mod.rs +++ b/clarity/src/vm/analysis/mod.rs @@ -37,12 +37,15 @@ use self::type_checker::v2_1::TypeChecker as TypeChecker2_1; pub use self::types::{AnalysisPass, ContractAnalysis}; use crate::vm::ast::{build_ast_with_rules, ASTRules}; use crate::vm::costs::LimitedCostTracker; -use crate::vm::database::{MemoryBackingStore, STORE_CONTRACT_SRC_INTERFACE}; +#[cfg(feature = "canonical")] +use crate::vm::database::MemoryBackingStore; +use crate::vm::database::STORE_CONTRACT_SRC_INTERFACE; use crate::vm::representations::SymbolicExpression; use crate::vm::types::{QualifiedContractIdentifier, TypeSignature}; use crate::vm::ClarityVersion; /// Used by CLI tools like the docs generator. Not used in production +#[cfg(feature = "canonical")] pub fn mem_type_check( snippet: &str, version: ClarityVersion, diff --git a/clarity/src/vm/analysis/read_only_checker/mod.rs b/clarity/src/vm/analysis/read_only_checker/mod.rs index b02923c1a1a..006b4f0cfe6 100644 --- a/clarity/src/vm/analysis/read_only_checker/mod.rs +++ b/clarity/src/vm/analysis/read_only_checker/mod.rs @@ -290,10 +290,11 @@ impl<'a, 'b> ReadOnlyChecker<'a, 'b> { | BuffToUIntBe | IntToAscii | IntToUtf8 | StringToInt | StringToUInt | IsStandard | ToConsensusBuff | PrincipalDestruct | PrincipalConstruct | Append | Concat | AsMaxLen | ContractOf | PrincipalOf | ListCons | GetBlockInfo | GetBurnBlockInfo - | TupleGet | TupleMerge | Len | Print | AsContract | Begin | FetchVar - | GetStxBalance | StxGetAccount | GetTokenBalance | GetAssetOwner | GetTokenSupply - | ElementAt | IndexOf | Slice | ReplaceAt | BitwiseAnd | BitwiseOr | BitwiseNot - | BitwiseLShift | BitwiseRShift | BitwiseXor2 | ElementAtAlias | IndexOfAlias => { + | GetStacksBlockInfo | GetTenureInfo | TupleGet | TupleMerge | Len | Print + | AsContract | Begin | FetchVar | GetStxBalance | StxGetAccount | GetTokenBalance + | GetAssetOwner | GetTokenSupply | ElementAt | IndexOf | Slice | ReplaceAt + | BitwiseAnd | BitwiseOr | BitwiseNot | BitwiseLShift | BitwiseRShift | BitwiseXor2 + | ElementAtAlias | IndexOfAlias => { // Check all arguments. self.check_each_expression_is_read_only(args) } diff --git a/clarity/src/vm/analysis/tests/mod.rs b/clarity/src/vm/analysis/tests/mod.rs index adb36b94fbd..2484ee86cd3 100644 --- a/clarity/src/vm/analysis/tests/mod.rs +++ b/clarity/src/vm/analysis/tests/mod.rs @@ -14,10 +14,15 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . +use stacks_common::types::StacksEpochId; + use crate::vm::analysis::errors::CheckErrors; use crate::vm::analysis::type_checker::v2_1::tests::mem_type_check; -use crate::vm::analysis::{type_check, AnalysisDatabase, ContractAnalysis}; +use crate::vm::analysis::{ + mem_type_check as mem_run_analysis, type_check, AnalysisDatabase, ContractAnalysis, +}; use crate::vm::ast::parse; +use crate::vm::ClarityVersion; #[test] fn test_list_types_must_match() { @@ -202,18 +207,87 @@ fn test_contract_call_expect_name() { #[test] fn test_no_such_block_info_property() { let snippet = "(get-block-info? unicorn 1)"; - let err = mem_type_check(snippet).unwrap_err(); + let err = + mem_run_analysis(snippet, ClarityVersion::Clarity2, StacksEpochId::latest()).unwrap_err(); assert!(format!("{}", err.diagnostic).contains("use of block unknown property 'unicorn'")); } +#[test] +fn test_no_such_stacks_block_info_property() { + let snippet = "(get-stacks-block-info? unicorn 1)"; + let err = + mem_run_analysis(snippet, ClarityVersion::Clarity3, StacksEpochId::latest()).unwrap_err(); + assert!( + format!("{}", err.diagnostic).contains("use of unknown stacks block property 'unicorn'") + ); +} + +#[test] +fn test_no_such_tenure_info_property() { + let snippet = "(get-tenure-info? unicorn 1)"; + let err = + mem_run_analysis(snippet, ClarityVersion::Clarity3, StacksEpochId::latest()).unwrap_err(); + assert!(format!("{}", err.diagnostic).contains("use of unknown tenure property 'unicorn'")); +} + #[test] fn test_get_block_info_expect_property_name() { let snippet = "(get-block-info? 0 1)"; - let err = mem_type_check(snippet).unwrap_err(); + let err = + mem_run_analysis(snippet, ClarityVersion::Clarity2, StacksEpochId::latest()).unwrap_err(); assert!(format!("{}", err.diagnostic) .contains("missing property name for block info introspection")); } +#[test] +fn test_get_stacks_block_info_expect_property_name() { + let snippet = "(get-stacks-block-info? 0 1)"; + let err = + mem_run_analysis(snippet, ClarityVersion::Clarity3, StacksEpochId::latest()).unwrap_err(); + assert!(format!("{}", err.diagnostic) + .contains("missing property name for stacks block info introspection")); +} + +#[test] +fn test_get_tenure_info_expect_property_name() { + let snippet = "(get-tenure-info? 0 1)"; + let err = + mem_run_analysis(snippet, ClarityVersion::Clarity3, StacksEpochId::latest()).unwrap_err(); + assert!(format!("{}", err.diagnostic) + .contains("missing property name for tenure info introspection")); +} + +#[test] +fn test_no_such_block_info_height() { + let snippet = "(get-block-info? time 1)"; + let err = + mem_run_analysis(snippet, ClarityVersion::Clarity2, StacksEpochId::latest()).unwrap_err(); + println!("{}", err.diagnostic); + assert!( + format!("{}", err.diagnostic).contains("expecting expression of type 'uint', found 'int'") + ); +} + +#[test] +fn test_no_such_stacks_block_info_height() { + let snippet = "(get-stacks-block-info? time 1)"; + let err = + mem_run_analysis(snippet, ClarityVersion::Clarity3, StacksEpochId::latest()).unwrap_err(); + assert!( + format!("{}", err.diagnostic).contains("expecting expression of type 'uint', found 'int'") + ); +} + +#[test] +fn test_no_such_tenure_info_height() { + let snippet = "(get-tenure-info? time 1)"; + let err = + mem_run_analysis(snippet, ClarityVersion::Clarity3, StacksEpochId::latest()).unwrap_err(); + assert!( + format!("{}", err.diagnostic).contains("expecting expression of type 'uint', found 'int'") + ); +} + #[test] fn test_name_already_used() { let snippet = "(define-constant var1 true) (define-constant var1 1)"; diff --git a/clarity/src/vm/analysis/trait_checker/tests.rs b/clarity/src/vm/analysis/trait_checker/tests.rs index bc3f9962841..b1d9bdb2228 100644 --- a/clarity/src/vm/analysis/trait_checker/tests.rs +++ b/clarity/src/vm/analysis/trait_checker/tests.rs @@ -1463,7 +1463,7 @@ fn test_dynamic_dispatch_pass_bound_principal_as_trait_in_user_defined_functions _ => panic!("{:?}", err), }; } - Ok(_) if version == ClarityVersion::Clarity2 => (), + Ok(_) if version >= ClarityVersion::Clarity2 => (), _ => panic!("got {:?}", result), } } diff --git a/clarity/src/vm/analysis/type_checker/mod.rs b/clarity/src/vm/analysis/type_checker/mod.rs index c8185dde74e..800347d0f01 100644 --- a/clarity/src/vm/analysis/type_checker/mod.rs +++ b/clarity/src/vm/analysis/type_checker/mod.rs @@ -84,3 +84,21 @@ impl FunctionType { } } } + +fn is_reserved_word_v3(word: &str) -> bool { + match word { + "block-height" => true, + _ => false, + } +} + +/// Is this a reserved word that should trigger an analysis error for the given +/// Clarity version? Note that most of the reserved words do not trigger an +/// analysis error, but will trigger an error at runtime. This should likely be +/// changed in a future Clarity version. +pub fn is_reserved_word(word: &str, version: ClarityVersion) -> bool { + match version { + ClarityVersion::Clarity1 | ClarityVersion::Clarity2 => false, + ClarityVersion::Clarity3 => is_reserved_word_v3(word), + } +} diff --git a/clarity/src/vm/analysis/type_checker/v2_05/mod.rs b/clarity/src/vm/analysis/type_checker/v2_05/mod.rs index d66cad5d4ee..2b913a3ac9c 100644 --- a/clarity/src/vm/analysis/type_checker/v2_05/mod.rs +++ b/clarity/src/vm/analysis/type_checker/v2_05/mod.rs @@ -323,9 +323,9 @@ fn type_reserved_variable(variable_name: &str) -> CheckResult TypeSignature::BoolType, TotalLiquidMicroSTX => TypeSignature::UIntType, Regtest => TypeSignature::BoolType, - TxSponsor | Mainnet | ChainId => { + TxSponsor | Mainnet | ChainId | StacksBlockHeight | TenureHeight => { return Err(CheckErrors::Expects( - "tx-sponsor, mainnet, and chain-id should not reach here in 2.05".into(), + "tx-sponsor, mainnet, chain-id, stacks-block-height, and tenure-height should not reach here in 2.05".into(), ) .into()) } diff --git a/clarity/src/vm/analysis/type_checker/v2_05/natives/mod.rs b/clarity/src/vm/analysis/type_checker/v2_05/natives/mod.rs index b38cfd0d115..201c307986e 100644 --- a/clarity/src/vm/analysis/type_checker/v2_05/natives/mod.rs +++ b/clarity/src/vm/analysis/type_checker/v2_05/natives/mod.rs @@ -773,9 +773,9 @@ impl TypedNativeFunction { | StringToUInt | IntToAscii | IntToUtf8 | GetBurnBlockInfo | StxTransferMemo | StxGetAccount | BitwiseAnd | BitwiseOr | BitwiseNot | BitwiseLShift | BitwiseRShift | BitwiseXor2 | Slice | ToConsensusBuff | FromConsensusBuff - | ReplaceAt => { + | ReplaceAt | GetStacksBlockInfo | GetTenureInfo => { return Err(CheckErrors::Expects( - "Clarity 2 keywords should not show up in 2.05".into(), + "Clarity 2+ keywords should not show up in 2.05".into(), ) .into()) } diff --git a/clarity/src/vm/analysis/type_checker/v2_05/tests/assets.rs b/clarity/src/vm/analysis/type_checker/v2_05/tests/assets.rs index df9c35ed0e5..5cfc9ab9922 100644 --- a/clarity/src/vm/analysis/type_checker/v2_05/tests/assets.rs +++ b/clarity/src/vm/analysis/type_checker/v2_05/tests/assets.rs @@ -17,9 +17,10 @@ use stacks_common::types::StacksEpochId; use crate::vm::analysis::errors::CheckErrors; -use crate::vm::analysis::{mem_type_check, AnalysisDatabase}; +use crate::vm::analysis::AnalysisDatabase; use crate::vm::ast::parse; use crate::vm::database::MemoryBackingStore; +use crate::vm::tooling::mem_type_check; use crate::vm::types::{ QualifiedContractIdentifier, SequenceSubtype, StringSubtype, TypeSignature, }; diff --git a/clarity/src/vm/analysis/type_checker/v2_1/contexts.rs b/clarity/src/vm/analysis/type_checker/v2_1/contexts.rs index 8cbed1a416d..d210194ea40 100644 --- a/clarity/src/vm/analysis/type_checker/v2_1/contexts.rs +++ b/clarity/src/vm/analysis/type_checker/v2_1/contexts.rs @@ -19,6 +19,7 @@ use std::collections::BTreeMap; use hashbrown::{HashMap, HashSet}; use crate::vm::analysis::errors::{CheckError, CheckErrors, CheckResult}; +use crate::vm::analysis::type_checker::is_reserved_word; use crate::vm::analysis::types::ContractAnalysis; use crate::vm::contexts::MAX_CONTEXT_DEPTH; use crate::vm::representations::{ClarityName, SymbolicExpression}; @@ -42,7 +43,7 @@ impl TraitContext { pub fn new(clarity_version: ClarityVersion) -> TraitContext { match clarity_version { ClarityVersion::Clarity1 => Self::Clarity1(HashMap::new()), - ClarityVersion::Clarity2 => Self::Clarity2 { + ClarityVersion::Clarity2 | ClarityVersion::Clarity3 => Self::Clarity2 { defined: HashSet::new(), all: HashMap::new(), }, @@ -128,6 +129,7 @@ impl TraitContext { } pub struct ContractContext { + clarity_version: ClarityVersion, contract_identifier: QualifiedContractIdentifier, map_types: HashMap, variable_types: HashMap, @@ -147,6 +149,7 @@ impl ContractContext { clarity_version: ClarityVersion, ) -> ContractContext { ContractContext { + clarity_version, contract_identifier, variable_types: HashMap::new(), private_function_types: HashMap::new(), @@ -168,6 +171,10 @@ impl ContractContext { } pub fn check_name_used(&self, name: &str) -> CheckResult<()> { + if is_reserved_word(name, self.clarity_version) { + return Err(CheckError::new(CheckErrors::ReservedWord(name.to_string()))); + } + if self.variable_types.contains_key(name) || self.persisted_variable_types.contains_key(name) || self.private_function_types.contains_key(name) @@ -279,6 +286,10 @@ impl ContractContext { trait_name: ClarityName, trait_signature: BTreeMap, ) -> CheckResult<()> { + if self.clarity_version >= ClarityVersion::Clarity3 { + self.check_name_used(&trait_name)?; + } + self.traits.add_defined_trait( self.contract_identifier.clone(), trait_name, @@ -292,6 +303,10 @@ impl ContractContext { trait_id: TraitIdentifier, trait_signature: BTreeMap, ) -> CheckResult<()> { + if self.clarity_version >= ClarityVersion::Clarity3 { + self.check_name_used(&alias)?; + } + self.traits.add_used_trait(alias, trait_id, trait_signature) } diff --git a/clarity/src/vm/analysis/type_checker/v2_1/mod.rs b/clarity/src/vm/analysis/type_checker/v2_1/mod.rs index b61d3bb6e28..7caf775c19b 100644 --- a/clarity/src/vm/analysis/type_checker/v2_1/mod.rs +++ b/clarity/src/vm/analysis/type_checker/v2_1/mod.rs @@ -37,6 +37,7 @@ use crate::vm::costs::{ analysis_typecheck_cost, cost_functions, runtime_cost, ClarityCostFunctionReference, CostErrors, CostOverflowingMath, CostTracker, ExecutionCost, LimitedCostTracker, }; +use crate::vm::diagnostic::Diagnostic; use crate::vm::functions::define::DefineFunctionsParsed; use crate::vm::functions::NativeFunctions; use crate::vm::representations::SymbolicExpressionType::{ @@ -151,7 +152,130 @@ impl TypeChecker<'_, '_> { pub type TypeResult = CheckResult; +pub fn compute_typecheck_cost( + track: &mut T, + t1: &TypeSignature, + t2: &TypeSignature, +) -> Result { + let t1_size = t1.type_size().map_err(|_| CostErrors::CostOverflow)?; + let t2_size = t2.type_size().map_err(|_| CostErrors::CostOverflow)?; + track.compute_cost( + ClarityCostFunction::AnalysisTypeCheck, + &[std::cmp::max(t1_size, t2_size).into()], + ) +} + +pub fn check_argument_len(expected: usize, args_len: usize) -> Result<(), CheckErrors> { + if args_len != expected { + Err(CheckErrors::IncorrectArgumentCount(expected, args_len)) + } else { + Ok(()) + } +} + impl FunctionType { + pub fn check_args_visitor_2_1( + &self, + accounting: &mut T, + arg_type: &TypeSignature, + arg_index: usize, + accumulated_type: Option<&TypeSignature>, + ) -> ( + Option>, + CheckResult>, + ) { + match self { + // variadic stops checking cost at the first error... + FunctionType::Variadic(expected_type, _) => { + let cost = Some(compute_typecheck_cost(accounting, expected_type, arg_type)); + let admitted = match expected_type.admits_type(&StacksEpochId::Epoch21, arg_type) { + Ok(admitted) => admitted, + Err(e) => return (cost, Err(e.into())), + }; + if !admitted { + return ( + cost, + Err(CheckErrors::TypeError(expected_type.clone(), arg_type.clone()).into()), + ); + } + (cost, Ok(None)) + } + FunctionType::ArithmeticVariadic => { + let cost = Some(compute_typecheck_cost( + accounting, + &TypeSignature::IntType, + arg_type, + )); + if arg_index == 0 { + let return_type = match arg_type { + TypeSignature::IntType => Ok(Some(TypeSignature::IntType)), + TypeSignature::UIntType => Ok(Some(TypeSignature::UIntType)), + _ => Err(CheckErrors::UnionTypeError( + vec![TypeSignature::IntType, TypeSignature::UIntType], + arg_type.clone(), + ) + .into()), + }; + (cost, return_type) + } else { + let return_type = accumulated_type + .ok_or_else(|| CheckErrors::Expects("Failed to set accumulated type for arg indices >= 1 in variadic arithmetic".into()).into()); + let check_result = return_type.and_then(|return_type| { + if arg_type != return_type { + Err( + CheckErrors::TypeError(return_type.clone(), arg_type.clone()) + .into(), + ) + } else { + Ok(None) + } + }); + (cost, check_result) + } + } + // For the fixed function types, the visitor will just + // tell the processor that any results greater than the args len + // do not need to be stored, because an error will occur before + // further checking anyways + FunctionType::Fixed(FixedFunction { + args: arg_types, .. + }) => { + if arg_index >= arg_types.len() { + // note: argument count will be wrong? + return ( + None, + Err(CheckErrors::IncorrectArgumentCount(arg_types.len(), arg_index).into()), + ); + } + return (None, Ok(None)); + } + // For the following function types, the visitor will just + // tell the processor that any results greater than len 1 or 2 + // do not need to be stored, because an error will occur before + // further checking anyways + FunctionType::ArithmeticUnary | FunctionType::UnionArgs(..) => { + if arg_index >= 1 { + return ( + None, + Err(CheckErrors::IncorrectArgumentCount(1, arg_index).into()), + ); + } + return (None, Ok(None)); + } + FunctionType::ArithmeticBinary + | FunctionType::ArithmeticComparison + | FunctionType::Binary(..) => { + if arg_index >= 2 { + return ( + None, + Err(CheckErrors::IncorrectArgumentCount(2, arg_index).into()), + ); + } + return (None, Ok(None)); + } + } + } + pub fn check_args_2_1( &self, accounting: &mut T, @@ -858,6 +982,8 @@ fn type_reserved_variable( .map_err(|_| CheckErrors::Expects("Bad construction".into()))?, ContractCaller => TypeSignature::PrincipalType, BlockHeight => TypeSignature::UIntType, + StacksBlockHeight => TypeSignature::UIntType, + TenureHeight => TypeSignature::UIntType, BurnBlockHeight => TypeSignature::UIntType, NativeNone => TypeSignature::new_option(no_type()) .map_err(|_| CheckErrors::Expects("Bad construction".into()))?, @@ -1015,17 +1141,23 @@ impl<'a, 'b> TypeChecker<'a, 'b> { args: &[SymbolicExpression], context: &TypingContext, ) -> TypeResult { - let mut types_returned = self.type_check_all(args, context)?; - - let last_return = types_returned - .pop() - .ok_or(CheckError::new(CheckErrors::CheckerImplementationFailure))?; - - for type_return in types_returned.iter() { - if type_return.is_response_type() { - return Err(CheckErrors::UncheckedIntermediaryResponses.into()); + let mut last_return = None; + let mut return_failure = Ok(()); + for ix in 0..args.len() { + let type_return = self.type_check(&args[ix], context)?; + if ix + 1 < args.len() { + if type_return.is_response_type() { + return_failure = Err(CheckErrors::UncheckedIntermediaryResponses); + } + } else { + last_return = Some(type_return); } } + + let last_return = last_return + .ok_or_else(|| CheckError::new(CheckErrors::CheckerImplementationFailure))?; + return_failure?; + Ok(last_return) } @@ -1050,8 +1182,56 @@ impl<'a, 'b> TypeChecker<'a, 'b> { epoch: StacksEpochId, clarity_version: ClarityVersion, ) -> TypeResult { - let typed_args = self.type_check_all(args, context)?; - func_type.check_args(self, &typed_args, epoch, clarity_version) + if epoch <= StacksEpochId::Epoch2_05 { + let typed_args = self.type_check_all(args, context)?; + return func_type.check_args(self, &typed_args, epoch, clarity_version); + } + // use func_type visitor pattern + let mut accumulated_type = None; + let mut total_costs = vec![]; + let mut check_result = Ok(()); + let mut accumulated_types = Vec::new(); + for (arg_ix, arg_expr) in args.iter().enumerate() { + let arg_type = self.type_check(arg_expr, context)?; + if check_result.is_ok() { + let (costs, result) = func_type.check_args_visitor_2_1( + self, + &arg_type, + arg_ix, + accumulated_type.as_ref(), + ); + // add the accumulated type and total cost *before* + // checking for an error: we want the subsequent error handling + // to account for this cost + accumulated_types.push(arg_type); + total_costs.extend(costs); + + match result { + Ok(Some(returned_type)) => { + accumulated_type = Some(returned_type); + } + Ok(None) => {} + Err(e) => { + check_result = Err(e); + } + }; + } + } + if let Err(mut check_error) = check_result { + if let CheckErrors::IncorrectArgumentCount(expected, _actual) = check_error.err { + check_error.err = CheckErrors::IncorrectArgumentCount(expected, args.len()); + check_error.diagnostic = Diagnostic::err(&check_error.err) + } + // accumulate the checking costs + // the reason we do this now (instead of within the loop) is for backwards compatibility + for cost in total_costs.into_iter() { + self.add_cost(cost?)?; + } + + return Err(check_error); + } + // otherwise, just invoke the normal checking routine + func_type.check_args(self, &accumulated_types, epoch, clarity_version) } fn get_function_type(&self, function_name: &str) -> Option { diff --git a/clarity/src/vm/analysis/type_checker/v2_1/natives/mod.rs b/clarity/src/vm/analysis/type_checker/v2_1/natives/mod.rs index c5aefb65eda..b576277a5b4 100644 --- a/clarity/src/vm/analysis/type_checker/v2_1/natives/mod.rs +++ b/clarity/src/vm/analysis/type_checker/v2_1/natives/mod.rs @@ -17,8 +17,8 @@ use stacks_common::types::StacksEpochId; use super::{ - check_argument_count, check_arguments_at_least, check_arguments_at_most, no_type, TypeChecker, - TypeResult, TypingContext, + check_argument_count, check_arguments_at_least, check_arguments_at_most, + compute_typecheck_cost, no_type, TypeChecker, TypeResult, TypingContext, }; use crate::vm::analysis::errors::{CheckError, CheckErrors, CheckResult}; use crate::vm::costs::cost_functions::ClarityCostFunction; @@ -35,8 +35,9 @@ use crate::vm::types::signatures::{ use crate::vm::types::TypeSignature::SequenceType; use crate::vm::types::{ BlockInfoProperty, BufferLength, BurnBlockInfoProperty, FixedFunction, FunctionArg, - FunctionSignature, FunctionType, PrincipalData, TupleTypeSignature, TypeSignature, Value, - BUFF_1, BUFF_20, BUFF_32, BUFF_33, BUFF_64, BUFF_65, MAX_VALUE_SIZE, + FunctionSignature, FunctionType, PrincipalData, StacksBlockInfoProperty, TenureInfoProperty, + TupleTypeSignature, TypeSignature, Value, BUFF_1, BUFF_20, BUFF_32, BUFF_33, BUFF_64, BUFF_65, + MAX_VALUE_SIZE, }; use crate::vm::{ClarityName, ClarityVersion, SymbolicExpression, SymbolicExpressionType}; @@ -61,14 +62,43 @@ fn check_special_list_cons( args: &[SymbolicExpression], context: &TypingContext, ) -> TypeResult { - let typed_args = checker.type_check_all(args, context)?; - for type_arg in typed_args.iter() { - runtime_cost( - ClarityCostFunction::AnalysisListItemsCheck, - checker, - type_arg.type_size()?, - )?; + let mut result = Vec::with_capacity(args.len()); + let mut entries_size: Option = Some(0); + let mut costs = Vec::with_capacity(args.len()); + + for arg in args.iter() { + // don't use map here, since type_check has side-effects. + let checked = checker.type_check(arg, context)?; + let cost = checked.type_size().and_then(|ty_size| { + checker + .compute_cost( + ClarityCostFunction::AnalysisListItemsCheck, + &[ty_size.into()], + ) + .map_err(CheckErrors::from) + }); + costs.push(cost); + + if let Some(cur_size) = entries_size.clone() { + entries_size = cur_size.checked_add(checked.size()?); + } + if let Some(cur_size) = entries_size { + if cur_size > MAX_VALUE_SIZE { + entries_size = None; + } + } + if entries_size.is_some() { + result.push(checked); + } + } + + for cost in costs.into_iter() { + checker.add_cost(cost?)?; } + if entries_size.is_none() { + return Err(CheckErrors::ValueTooLarge.into()); + } + let typed_args = result; TypeSignature::parent_list_type(&typed_args) .map_err(|x| x.into()) .map(TypeSignature::from) @@ -202,6 +232,9 @@ pub fn check_special_tuple_cons( args.len(), )?; + let mut type_size = 0u32; + let mut cons_error = Ok(()); + handle_binding_list(args, |var_name, var_sexp| { checker.type_check(var_sexp, context).and_then(|var_type| { runtime_cost( @@ -209,11 +242,21 @@ pub fn check_special_tuple_cons( checker, var_type.type_size()?, )?; - tuple_type_data.push((var_name.clone(), var_type)); + if type_size < MAX_VALUE_SIZE { + type_size = type_size + .saturating_add(var_name.len() as u32) + .saturating_add(var_name.len() as u32) + .saturating_add(var_type.type_size()?) + .saturating_add(var_type.size()?); + tuple_type_data.push((var_name.clone(), var_type)); + } else { + cons_error = Err(CheckErrors::BadTupleConstruction); + } Ok(()) }) })?; + cons_error?; let tuple_signature = TupleTypeSignature::try_from(tuple_type_data) .map_err(|_e| CheckErrors::BadTupleConstruction)?; @@ -338,15 +381,33 @@ fn check_special_equals( ) -> TypeResult { check_arguments_at_least(1, args)?; - let arg_types = checker.type_check_all(args, context)?; + let mut arg_type = None; + let mut costs = Vec::with_capacity(args.len()); - let mut arg_type = arg_types[0].clone(); - for x_type in arg_types.into_iter() { - analysis_typecheck_cost(checker, &x_type, &arg_type)?; - arg_type = TypeSignature::least_supertype(&StacksEpochId::Epoch21, &x_type, &arg_type) - .map_err(|_| CheckErrors::TypeError(x_type, arg_type))?; + for arg in args.iter() { + let x_type = checker.type_check(arg, context)?; + if arg_type.is_none() { + arg_type = Some(Ok(x_type.clone())); + } + if let Some(Ok(cur_type)) = arg_type { + let cost = compute_typecheck_cost(checker, &x_type, &cur_type); + costs.push(cost); + arg_type = Some( + TypeSignature::least_supertype(&StacksEpochId::Epoch21, &x_type, &cur_type) + .map_err(|_| CheckErrors::TypeError(x_type, cur_type)), + ); + } } + for cost in costs.into_iter() { + checker.add_cost(cost?)?; + } + + // check if there was a least supertype failure. + arg_type.ok_or_else(|| { + CheckErrors::Expects("Arg type should be set because arguments checked for >= 1".into()) + })??; + Ok(TypeSignature::BoolType) } @@ -699,6 +760,48 @@ fn check_get_burn_block_info( )?) } +fn check_get_stacks_block_info( + checker: &mut TypeChecker, + args: &[SymbolicExpression], + context: &TypingContext, +) -> TypeResult { + check_argument_count(2, args)?; + + let block_info_prop_str = args[0].match_atom().ok_or(CheckError::new( + CheckErrors::GetStacksBlockInfoExpectPropertyName, + ))?; + + let block_info_prop = + StacksBlockInfoProperty::lookup_by_name(block_info_prop_str).ok_or(CheckError::new( + CheckErrors::NoSuchStacksBlockInfoProperty(block_info_prop_str.to_string()), + ))?; + + checker.type_check_expects(&args[1], context, &TypeSignature::UIntType)?; + + Ok(TypeSignature::new_option(block_info_prop.type_result())?) +} + +fn check_get_tenure_info( + checker: &mut TypeChecker, + args: &[SymbolicExpression], + context: &TypingContext, +) -> TypeResult { + check_argument_count(2, args)?; + + let block_info_prop_str = args[0].match_atom().ok_or(CheckError::new( + CheckErrors::GetTenureInfoExpectPropertyName, + ))?; + + let block_info_prop = + TenureInfoProperty::lookup_by_name(block_info_prop_str).ok_or(CheckError::new( + CheckErrors::NoSuchTenureInfoProperty(block_info_prop_str.to_string()), + ))?; + + checker.type_check_expects(&args[1], context, &TypeSignature::UIntType)?; + + Ok(TypeSignature::new_option(block_info_prop.type_result())?) +} + impl TypedNativeFunction { pub fn type_check_application( &self, @@ -1034,6 +1137,8 @@ impl TypedNativeFunction { PrincipalOf => Special(SpecialNativeFunction(&check_principal_of)), GetBlockInfo => Special(SpecialNativeFunction(&check_get_block_info)), GetBurnBlockInfo => Special(SpecialNativeFunction(&check_get_burn_block_info)), + GetStacksBlockInfo => Special(SpecialNativeFunction(&check_get_stacks_block_info)), + GetTenureInfo => Special(SpecialNativeFunction(&check_get_tenure_info)), ConsSome => Special(SpecialNativeFunction(&options::check_special_some)), ConsOkay => Special(SpecialNativeFunction(&options::check_special_okay)), ConsError => Special(SpecialNativeFunction(&options::check_special_error)), diff --git a/clarity/src/vm/analysis/type_checker/v2_1/natives/sequences.rs b/clarity/src/vm/analysis/type_checker/v2_1/natives/sequences.rs index 090b259a26d..c1b3aabb178 100644 --- a/clarity/src/vm/analysis/type_checker/v2_1/natives/sequences.rs +++ b/clarity/src/vm/analysis/type_checker/v2_1/natives/sequences.rs @@ -22,7 +22,8 @@ use crate::vm::analysis::type_checker::v2_1::{ TypeResult, TypingContext, }; use crate::vm::costs::cost_functions::ClarityCostFunction; -use crate::vm::costs::{analysis_typecheck_cost, cost_functions, runtime_cost}; +use crate::vm::costs::{analysis_typecheck_cost, cost_functions, runtime_cost, CostTracker}; +use crate::vm::diagnostic::Diagnostic; use crate::vm::functions::NativeFunctions; use crate::vm::representations::{SymbolicExpression, SymbolicExpressionType}; pub use crate::vm::types::signatures::{BufferLength, ListTypeData, StringUTF8Length, BUFF_1}; @@ -73,9 +74,15 @@ pub fn check_special_map( )?; let iter = args[1..].iter(); - let mut func_args = Vec::with_capacity(iter.len()); let mut min_args = u32::MAX; - for arg in iter { + + // use func_type visitor pattern + let mut accumulated_type = None; + let mut total_costs = vec![]; + let mut check_result = Ok(()); + let mut accumulated_types = Vec::new(); + + for (arg_ix, arg) in iter.enumerate() { let argument_type = checker.type_check(arg, context)?; let entry_type = match argument_type { TypeSignature::SequenceType(sequence) => { @@ -101,11 +108,52 @@ pub fn check_special_map( return Err(CheckErrors::ExpectedSequence(argument_type).into()); } }; - func_args.push(entry_type); + + if check_result.is_ok() { + let (costs, result) = function_type.check_args_visitor_2_1( + checker, + &entry_type, + arg_ix, + accumulated_type.as_ref(), + ); + // add the accumulated type and total cost *before* + // checking for an error: we want the subsequent error handling + // to account for this cost + accumulated_types.push(entry_type); + total_costs.extend(costs); + + match result { + Ok(Some(returned_type)) => { + accumulated_type = Some(returned_type); + } + Ok(None) => {} + Err(e) => { + check_result = Err(e); + } + }; + } } - let mapped_type = - function_type.check_args(checker, &func_args, context.epoch, context.clarity_version)?; + if let Err(mut check_error) = check_result { + if let CheckErrors::IncorrectArgumentCount(expected, _actual) = check_error.err { + check_error.err = + CheckErrors::IncorrectArgumentCount(expected, args.len().saturating_sub(1)); + check_error.diagnostic = Diagnostic::err(&check_error.err) + } + // accumulate the checking costs + for cost in total_costs.into_iter() { + checker.add_cost(cost?)?; + } + + return Err(check_error); + } + + let mapped_type = function_type.check_args( + checker, + &accumulated_types, + context.epoch, + context.clarity_version, + )?; TypeSignature::list_of(mapped_type, min_args) .map_err(|_| CheckErrors::ConstructedListTooLarge.into()) } diff --git a/clarity/src/vm/analysis/type_checker/v2_1/tests/assets.rs b/clarity/src/vm/analysis/type_checker/v2_1/tests/assets.rs index c870fdbab7a..ba120575bd3 100644 --- a/clarity/src/vm/analysis/type_checker/v2_1/tests/assets.rs +++ b/clarity/src/vm/analysis/type_checker/v2_1/tests/assets.rs @@ -130,7 +130,7 @@ fn test_names_tokens_contracts(#[case] version: ClarityVersion, #[case] epoch: S #[test] fn test_bad_asset_usage() { - use crate::vm::analysis::type_check; + use crate::vm::analysis::mem_type_check as mem_run_analysis; let bad_scripts = [ "(ft-get-balance stackoos tx-sender)", @@ -218,7 +218,12 @@ fn test_bad_asset_usage() { for (script, expected_err) in bad_scripts.iter().zip(expected.iter()) { let tokens_contract = format!("{}\n{}", FIRST_CLASS_TOKENS, script); - let actual_err = mem_type_check(&tokens_contract).unwrap_err(); + let actual_err = mem_run_analysis( + &tokens_contract, + ClarityVersion::Clarity2, + StacksEpochId::latest(), + ) + .unwrap_err(); println!("{}", script); assert_eq!(&actual_err.err, expected_err); } diff --git a/clarity/src/vm/analysis/type_checker/v2_1/tests/contracts.rs b/clarity/src/vm/analysis/type_checker/v2_1/tests/contracts.rs index 99942ba42c2..b87177062ce 100644 --- a/clarity/src/vm/analysis/type_checker/v2_1/tests/contracts.rs +++ b/clarity/src/vm/analysis/type_checker/v2_1/tests/contracts.rs @@ -433,7 +433,7 @@ fn test_names_tokens_contracts_interface() { "fungible_tokens": [], "non_fungible_tokens": [], "epoch": "Epoch21", - "clarity_version": "Clarity2" + "clarity_version": "Clarity3" }"#).unwrap(); eprintln!("{}", test_contract_json_str); @@ -1894,7 +1894,7 @@ fn clarity_trait_experiments_double_trait( // Can we define a trait with two methods with the same name and different types? match db.execute(|db| load_versioned(db, "double-trait", version, epoch)) { Ok(_) if version == ClarityVersion::Clarity1 => (), - Err(err) if version == ClarityVersion::Clarity2 => { + Err(err) if version >= ClarityVersion::Clarity2 => { assert!(err.starts_with("DefineTraitDuplicateMethod(\"foo\")")) } res => panic!("got {:?}", res), @@ -1915,7 +1915,7 @@ fn clarity_trait_experiments_impl_double_trait_both( load_versioned(db, "impl-double-trait-both", version, epoch) }) { Ok(_) if version == ClarityVersion::Clarity1 => (), - Err(err) if version == ClarityVersion::Clarity2 => { + Err(err) if version >= ClarityVersion::Clarity2 => { assert!(err.starts_with("DefineTraitDuplicateMethod(\"foo\")")) } res => panic!("got {:?}", res), @@ -1938,7 +1938,7 @@ fn clarity_trait_experiments_impl_double_trait_1( Err(err) if version == ClarityVersion::Clarity1 => { assert!(err.starts_with("BadTraitImplementation(\"double-method\", \"foo\")")) } - Err(err) if version == ClarityVersion::Clarity2 => { + Err(err) if version >= ClarityVersion::Clarity2 => { assert!(err.starts_with("DefineTraitDuplicateMethod(\"foo\")")) } res => panic!("got {:?}", res), @@ -1959,7 +1959,7 @@ fn clarity_trait_experiments_impl_double_trait_2( load_versioned(db, "impl-double-trait-2", version, epoch) }) { Ok(_) if version == ClarityVersion::Clarity1 => (), - Err(err) if version == ClarityVersion::Clarity2 => { + Err(err) if version >= ClarityVersion::Clarity2 => { assert!(err.starts_with("DefineTraitDuplicateMethod(\"foo\")")) } res => panic!("got {:?}", res), @@ -1983,7 +1983,7 @@ fn clarity_trait_experiments_use_double_trait( Err(err) if version == ClarityVersion::Clarity1 => { assert!(err.starts_with("TypeError(BoolType, UIntType)")) } - Err(err) if version == ClarityVersion::Clarity2 => { + Err(err) if version >= ClarityVersion::Clarity2 => { assert!(err.starts_with("DefineTraitDuplicateMethod(\"foo\")")) } res => panic!("got {:?}", res), @@ -2007,7 +2007,7 @@ fn clarity_trait_experiments_use_partial_double_trait_1( Err(err) if version == ClarityVersion::Clarity1 => { assert!(err.starts_with("TypeError(BoolType, UIntType)")) } - Err(err) if version == ClarityVersion::Clarity2 => { + Err(err) if version >= ClarityVersion::Clarity2 => { assert!(err.starts_with("DefineTraitDuplicateMethod(\"foo\")")) } res => panic!("got {:?}", res), @@ -2029,7 +2029,7 @@ fn clarity_trait_experiments_use_partial_double_trait_2( load_versioned(db, "use-partial-double-trait-2", version, epoch) }) { Ok(_) if version == ClarityVersion::Clarity1 => (), - Err(err) if version == ClarityVersion::Clarity2 => { + Err(err) if version >= ClarityVersion::Clarity2 => { assert!(err.starts_with("DefineTraitDuplicateMethod(\"foo\")")) } res => panic!("got {:?}", res), @@ -2047,7 +2047,7 @@ fn clarity_trait_experiments_identical_double_trait( // Can we define a trait with two methods with the same name and the same type? match db.execute(|db| load_versioned(db, "identical-double-trait", version, epoch)) { Ok(_) if version == ClarityVersion::Clarity1 => (), - Err(err) if version == ClarityVersion::Clarity2 => { + Err(err) if version >= ClarityVersion::Clarity2 => { assert!(err.starts_with("DefineTraitDuplicateMethod(\"foo\")")) } res => panic!("got {:?}", res), @@ -2068,7 +2068,7 @@ fn clarity_trait_experiments_impl_identical_double_trait( load_versioned(db, "impl-identical-double-trait", version, epoch) }) { Ok(_) if version == ClarityVersion::Clarity1 => (), - Err(err) if version == ClarityVersion::Clarity2 => { + Err(err) if version >= ClarityVersion::Clarity2 => { assert!(err.starts_with("DefineTraitDuplicateMethod(\"foo\")")) } res => panic!("got {:?}", res), @@ -2126,7 +2126,7 @@ fn clarity_trait_experiments_use_math_trait_transitive_name( load_versioned(db, "use-math-trait-transitive-name", version, epoch) }) { Ok(_) if version == ClarityVersion::Clarity1 => (), - Err(err) if version == ClarityVersion::Clarity2 => { + Err(err) if version >= ClarityVersion::Clarity2 => { assert!(err.starts_with("TraitReferenceUnknown(\"math-alias\")")) } res => panic!("got {:?}", res), @@ -2147,7 +2147,7 @@ fn clarity_trait_experiments_use_original_and_define_a_trait( load_versioned(db, "use-original-and-define-a-trait", version, epoch) }); match result { - Ok(_) if version == ClarityVersion::Clarity2 => (), + Ok(_) if version >= ClarityVersion::Clarity2 => (), Err(err) if version == ClarityVersion::Clarity1 => { assert!(err.starts_with("TraitMethodUnknown(\"a\", \"do-it\")")) } @@ -2170,7 +2170,7 @@ fn clarity_trait_experiments_use_redefined_and_define_a_trait( load_versioned(db, "use-redefined-and-define-a-trait", version, epoch) }) { Ok(_) if version == ClarityVersion::Clarity1 => (), - Err(err) if version == ClarityVersion::Clarity2 => { + Err(err) if version >= ClarityVersion::Clarity2 => { assert!(err.starts_with("TraitMethodUnknown(\"a\", \"do-that\")")) } res => panic!("got {:?}", res), @@ -2266,7 +2266,7 @@ fn clarity_trait_experiments_call_nested_trait_1( Err(err) if version == ClarityVersion::Clarity1 => { assert!(err.starts_with("TypeError")) } - Ok(_) if version == ClarityVersion::Clarity2 => (), + Ok(_) if version >= ClarityVersion::Clarity2 => (), res => panic!("got {:?}", res), }; } @@ -2292,7 +2292,7 @@ fn clarity_trait_experiments_call_nested_trait_2( Err(err) if version == ClarityVersion::Clarity1 => { assert!(err.starts_with("TypeError")) } - Ok(_) if version == ClarityVersion::Clarity2 => (), + Ok(_) if version >= ClarityVersion::Clarity2 => (), res => panic!("got {:?}", res), }; } @@ -2318,7 +2318,7 @@ fn clarity_trait_experiments_call_nested_trait_3_ok( Err(err) if version == ClarityVersion::Clarity1 => { assert!(err.starts_with("TypeError")) } - Ok(_) if version == ClarityVersion::Clarity2 => (), + Ok(_) if version >= ClarityVersion::Clarity2 => (), res => panic!("got {:?}", res), }; } @@ -2374,7 +2374,7 @@ fn clarity_trait_experiments_call_nested_trait_4( Err(err) if version == ClarityVersion::Clarity1 => { assert!(err.starts_with("TypeError")) } - Ok(_) if version == ClarityVersion::Clarity2 => (), + Ok(_) if version >= ClarityVersion::Clarity2 => (), res => panic!("got {:?}", res), }; } @@ -2469,7 +2469,7 @@ fn clarity_trait_experiments_call_let_rename_trait( load_versioned(db, "call-let-rename-trait", version, epoch) }); match result { - Ok(_) if version == ClarityVersion::Clarity2 => (), + Ok(_) if version >= ClarityVersion::Clarity2 => (), Err(err) if version == ClarityVersion::Clarity1 => { assert!(err.starts_with("TraitReferenceUnknown(\"new-math-contract\")")) } @@ -2637,7 +2637,7 @@ fn clarity_trait_experiments_constant_call( load_versioned(db, "constant-call", version, epoch) }); match result { - Ok(_) if version == ClarityVersion::Clarity2 => (), + Ok(_) if version >= ClarityVersion::Clarity2 => (), Err(err) if version == ClarityVersion::Clarity1 => { assert!(err.starts_with("TraitReferenceUnknown(\"principal-value\")")) } @@ -2660,7 +2660,7 @@ fn clarity_trait_experiments_constant_to_trait( load_versioned(db, "constant-to-trait", version, epoch) }); match result { - Ok(_) if version == ClarityVersion::Clarity2 => (), + Ok(_) if version >= ClarityVersion::Clarity2 => (), Err(err) if epoch <= StacksEpochId::Epoch2_05 => { assert!(err.starts_with("TypeError(TraitReferenceType")) } @@ -2687,7 +2687,7 @@ fn clarity_trait_experiments_constant_to_constant_call( load_versioned(db, "constant-to-constant-call", version, epoch) }); match result { - Ok(_) if version == ClarityVersion::Clarity2 => (), + Ok(_) if version >= ClarityVersion::Clarity2 => (), Err(err) if epoch <= StacksEpochId::Epoch2_05 => { assert!(err.starts_with("TypeError(TraitReferenceType")) } @@ -2740,7 +2740,9 @@ fn clarity_trait_experiments_downcast_literal_2( }) .unwrap_err(); match version { - ClarityVersion::Clarity2 => assert!(err.starts_with("ExpectedCallableType(PrincipalType)")), + ClarityVersion::Clarity2 | ClarityVersion::Clarity3 => { + assert!(err.starts_with("ExpectedCallableType(PrincipalType)")) + } ClarityVersion::Clarity1 => { assert!(err.starts_with("TraitReferenceUnknown(\"principal-value\")")) } @@ -2874,7 +2876,7 @@ fn clarity_trait_experiments_identical_trait_cast( load_versioned(db, "identical-trait-cast", version, epoch) }); match result { - Ok(_) if version == ClarityVersion::Clarity2 => (), + Ok(_) if version >= ClarityVersion::Clarity2 => (), Err(err) if epoch <= StacksEpochId::Epoch2_05 => { assert!(err.starts_with("TypeError(TraitReferenceType(TraitIdentifier")) } @@ -2900,7 +2902,7 @@ fn clarity_trait_experiments_trait_cast( load_versioned(db, "trait-cast", version, epoch) }); match result { - Ok(_) if version == ClarityVersion::Clarity2 => (), + Ok(_) if version >= ClarityVersion::Clarity2 => (), Err(err) if epoch <= StacksEpochId::Epoch2_05 => { assert!(err.starts_with("TypeError(TraitReferenceType(TraitIdentifier")) } @@ -2935,7 +2937,9 @@ fn clarity_trait_experiments_trait_cast_incompatible( assert!(err.starts_with("TypeError(CallableType(Trait(TraitIdentifier")) } } - ClarityVersion::Clarity2 => assert!(err.starts_with("IncompatibleTrait")), + ClarityVersion::Clarity2 | ClarityVersion::Clarity3 => { + assert!(err.starts_with("IncompatibleTrait")) + } } } @@ -3208,7 +3212,7 @@ fn clarity_trait_experiments_call_full_double_trait( }); match result { Ok(_) if version == ClarityVersion::Clarity1 => (), - Err(err) if version == ClarityVersion::Clarity2 => { + Err(err) if version >= ClarityVersion::Clarity2 => { assert!(err.starts_with("DefineTraitDuplicateMethod(\"foo\")")) } res => panic!("got {:?}", res), @@ -3239,7 +3243,7 @@ fn clarity_trait_experiments_call_partial_double_trait( }); match result { Ok(_) if version == ClarityVersion::Clarity1 => (), - Err(err) if version == ClarityVersion::Clarity2 => { + Err(err) if version >= ClarityVersion::Clarity2 => { assert!(err.starts_with("DefineTraitDuplicateMethod(\"foo\")")) } res => panic!("got {:?}", res), @@ -3290,7 +3294,7 @@ fn clarity_trait_experiments_principals_list_to_traits_list( load_versioned(db, "list-of-principals", version, epoch) }); match result { - Ok(_) if version == ClarityVersion::Clarity2 => (), + Ok(_) if version >= ClarityVersion::Clarity2 => (), Err(err) if version == ClarityVersion::Clarity1 => { assert!(err.starts_with("TypeError(SequenceType(ListType")) } @@ -3333,7 +3337,7 @@ fn clarity_trait_experiments_mixed_list_to_traits_list( load_versioned(db, "mixed-list", version, epoch) }); match result { - Ok(_) if version == ClarityVersion::Clarity2 => (), + Ok(_) if version >= ClarityVersion::Clarity2 => (), Err(err) if epoch <= StacksEpochId::Epoch2_05 => { assert!(err.starts_with("TypeError(TraitReferenceType")) } diff --git a/clarity/src/vm/analysis/type_checker/v2_1/tests/mod.rs b/clarity/src/vm/analysis/type_checker/v2_1/tests/mod.rs index 85a6b39ea9c..12597c88fae 100644 --- a/clarity/src/vm/analysis/type_checker/v2_1/tests/mod.rs +++ b/clarity/src/vm/analysis/type_checker/v2_1/tests/mod.rs @@ -29,7 +29,6 @@ use crate::vm::analysis::{mem_type_check as mem_run_analysis, AnalysisDatabase}; use crate::vm::ast::errors::ParseErrors; use crate::vm::ast::{build_ast, parse}; use crate::vm::contexts::OwnedEnvironment; -use crate::vm::database::MemoryBackingStore; use crate::vm::representations::SymbolicExpression; use crate::vm::tests::test_clarity_versions; use crate::vm::types::signatures::TypeSignature::OptionalType; @@ -61,11 +60,15 @@ fn type_check_helper(exp: &str) -> TypeResult { mem_type_check(exp).map(|(type_sig_opt, _)| type_sig_opt.unwrap()) } -fn type_check_helper_v1(exp: &str) -> TypeResult { - mem_run_analysis(exp, ClarityVersion::Clarity1, StacksEpochId::latest()) +fn type_check_helper_version(exp: &str, version: ClarityVersion) -> TypeResult { + mem_run_analysis(exp, version, StacksEpochId::latest()) .map(|(type_sig_opt, _)| type_sig_opt.unwrap()) } +fn type_check_helper_v1(exp: &str) -> TypeResult { + type_check_helper_version(exp, ClarityVersion::Clarity1) +} + fn buff_type(size: u32) -> TypeSignature { TypeSignature::SequenceType(BufferType(size.try_into().unwrap())) } @@ -270,18 +273,29 @@ fn test_get_block_info() { for (good_test, expected) in good.iter().zip(expected.iter()) { assert_eq!( expected, - &format!("{}", type_check_helper(good_test).unwrap()) + &format!( + "{}", + type_check_helper_version(good_test, ClarityVersion::Clarity2).unwrap() + ) ); } for (good_test_v210, expected_v210) in good_v210.iter().zip(expected_v210.iter()) { assert_eq!( expected_v210, - &format!("{}", type_check_helper(good_test_v210).unwrap()) + &format!( + "{}", + type_check_helper_version(good_test_v210, ClarityVersion::Clarity2).unwrap() + ) ); } for (bad_test, expected) in bad.iter().zip(bad_expected.iter()) { - assert_eq!(expected, &type_check_helper(bad_test).unwrap_err().err); + assert_eq!( + expected, + &type_check_helper_version(bad_test, ClarityVersion::Clarity2) + .unwrap_err() + .err + ); } for good_test in good_v210.iter() { diff --git a/clarity/src/vm/ast/definition_sorter/tests.rs b/clarity/src/vm/ast/definition_sorter/tests.rs index d0b24164ae9..2c993db2660 100644 --- a/clarity/src/vm/ast/definition_sorter/tests.rs +++ b/clarity/src/vm/ast/definition_sorter/tests.rs @@ -25,7 +25,6 @@ use crate::vm::ast::errors::{ParseErrors, ParseResult}; use crate::vm::ast::expression_identifier::ExpressionIdentifier; use crate::vm::ast::parser; use crate::vm::ast::types::{BuildASTPass, ContractAST}; -use crate::vm::database::MemoryBackingStore; use crate::vm::types::QualifiedContractIdentifier; use crate::vm::ClarityVersion; diff --git a/clarity/src/vm/ast/parser/v1.rs b/clarity/src/vm/ast/parser/v1.rs index 75c5ea2df94..5c2715e9f79 100644 --- a/clarity/src/vm/ast/parser/v1.rs +++ b/clarity/src/vm/ast/parser/v1.rs @@ -508,6 +508,8 @@ fn handle_expression( } } +// TODO: add tests from mutation testing results #4828 +#[cfg_attr(test, mutants::skip)] pub fn parse_lexed(input: Vec<(LexItem, u32, u32)>) -> ParseResult> { let mut parse_stack = Vec::new(); diff --git a/clarity/src/vm/ast/parser/v2/mod.rs b/clarity/src/vm/ast/parser/v2/mod.rs index a7ba4eb3c89..4c46e76a4d9 100644 --- a/clarity/src/vm/ast/parser/v2/mod.rs +++ b/clarity/src/vm/ast/parser/v2/mod.rs @@ -206,6 +206,8 @@ impl<'a> Parser<'a> { } } + // TODO: add tests from mutation testing results #4829 + #[cfg_attr(test, mutants::skip)] /// Process a new child node for an AST expression that is open and waiting for children nodes. For example, /// a list or tuple expression that is waiting for child expressions. /// @@ -275,6 +277,8 @@ impl<'a> Parser<'a> { } } + // TODO: add tests from mutation testing results #4848 + #[cfg_attr(test, mutants::skip)] fn handle_open_tuple( &mut self, open_tuple: &mut OpenTuple, diff --git a/clarity/src/vm/ast/sugar_expander/mod.rs b/clarity/src/vm/ast/sugar_expander/mod.rs index 0f28093932b..670796cf4cc 100644 --- a/clarity/src/vm/ast/sugar_expander/mod.rs +++ b/clarity/src/vm/ast/sugar_expander/mod.rs @@ -58,6 +58,8 @@ impl SugarExpander { Ok(()) } + // TODO: add tests from mutation testing results #4830 + #[cfg_attr(test, mutants::skip)] pub fn transform( &self, pre_exprs_iter: PreExpressionsDrain, diff --git a/clarity/src/vm/contexts.rs b/clarity/src/vm/contexts.rs index 7ad70e4ffbc..a559ad59fd1 100644 --- a/clarity/src/vm/contexts.rs +++ b/clarity/src/vm/contexts.rs @@ -74,7 +74,7 @@ pub struct Environment<'a, 'b, 'hooks> { } pub struct OwnedEnvironment<'a, 'hooks> { - context: GlobalContext<'a, 'hooks>, + pub(crate) context: GlobalContext<'a, 'hooks>, call_stack: CallStack, } @@ -198,7 +198,7 @@ pub struct GlobalContext<'a, 'hooks> { read_only: Vec, pub cost_track: LimitedCostTracker, pub mainnet: bool, - /// This is the epoch of the the block that this transaction is executing within. + /// This is the epoch of the block that this transaction is executing within. pub epoch_id: StacksEpochId, /// This is the chain ID of the transaction pub chain_id: u32, @@ -973,7 +973,11 @@ impl<'a, 'b, 'hooks> Environment<'a, 'b, 'hooks> { let contract = self .global_context .database - .get_contract(contract_identifier)?; + .get_contract(contract_identifier) + .or_else(|e| { + self.global_context.roll_back()?; + Err(e) + })?; let result = { let mut nested_env = Environment::new( @@ -1049,7 +1053,7 @@ impl<'a, 'b, 'hooks> Environment<'a, 'b, 'hooks> { result } - /// This is the epoch of the the block that this transaction is executing within. + /// This is the epoch of the block that this transaction is executing within. /// Note: in the current plans for 2.1, there is also a contract-specific **Clarity version** /// which governs which native functions are available / defined. That is separate from this /// epoch identifier, and most Clarity VM changes should consult that value instead. This diff --git a/clarity/src/vm/costs/mod.rs b/clarity/src/vm/costs/mod.rs index 744b6056911..0751822ed01 100644 --- a/clarity/src/vm/costs/mod.rs +++ b/clarity/src/vm/costs/mod.rs @@ -19,7 +19,6 @@ use std::{cmp, fmt}; use hashbrown::HashMap; use lazy_static::lazy_static; -use rusqlite::types::{FromSql, FromSqlResult, ToSql, ToSqlOutput, ValueRef}; use serde::{Deserialize, Serialize}; use stacks_common::types::StacksEpochId; @@ -783,12 +782,11 @@ impl LimitedCostTracker { } impl TrackerData { + // TODO: add tests from mutation testing results #4831 + #[cfg_attr(test, mutants::skip)] /// `apply_updates` - tells this function to look for any changes in the cost voting contract /// which would need to be applied. if `false`, just load the last computed cost state in this /// fork. - /// TODO: #4587 add test for Err cases - /// Or keep the skip and remove the comment - #[cfg_attr(test, mutants::skip)] fn load_costs(&mut self, clarity_db: &mut ClarityDatabase, apply_updates: bool) -> Result<()> { clarity_db.begin(); let epoch_id = clarity_db @@ -958,6 +956,8 @@ fn parse_cost( } } +// TODO: add tests from mutation testing results #4832 +#[cfg_attr(test, mutants::skip)] fn compute_cost( cost_tracker: &mut TrackerData, cost_function_reference: ClarityCostFunctionReference, @@ -1166,23 +1166,6 @@ impl fmt::Display for ExecutionCost { } } -impl ToSql for ExecutionCost { - fn to_sql(&self) -> rusqlite::Result { - let val = serde_json::to_string(self) - .map_err(|e| rusqlite::Error::ToSqlConversionFailure(Box::new(e)))?; - Ok(ToSqlOutput::from(val)) - } -} - -impl FromSql for ExecutionCost { - fn column_result(value: ValueRef) -> FromSqlResult { - let str_val = String::column_result(value)?; - let parsed = serde_json::from_str(&str_val) - .map_err(|e| rusqlite::types::FromSqlError::Other(Box::new(e)))?; - Ok(parsed) - } -} - pub trait CostOverflowingMath { fn cost_overflow_mul(self, other: T) -> Result; fn cost_overflow_add(self, other: T) -> Result; diff --git a/clarity/src/vm/database/clarity_db.rs b/clarity/src/vm/database/clarity_db.rs index b395f88c6db..cdf411fc3ed 100644 --- a/clarity/src/vm/database/clarity_db.rs +++ b/clarity/src/vm/database/clarity_db.rs @@ -25,9 +25,7 @@ use stacks_common::types::chainstate::{ BlockHeaderHash, BurnchainHeaderHash, ConsensusHash, SortitionId, StacksAddress, StacksBlockId, VRFSeed, }; -use stacks_common::types::{ - Address, StacksEpoch as GenericStacksEpoch, StacksEpochId, PEER_VERSION_EPOCH_2_0, -}; +use stacks_common::types::{Address, StacksEpoch as GenericStacksEpoch, StacksEpochId}; use stacks_common::util::hash::{to_hex, Hash160, Sha256Sum, Sha512Trunc256Sum}; use super::clarity_store::SpecialCaseHandler; @@ -54,6 +52,7 @@ use crate::vm::types::{ }; pub const STORE_CONTRACT_SRC_INTERFACE: bool = true; +const TENURE_HEIGHT_KEY: &str = "_stx-data::tenure_height"; pub type StacksEpoch = GenericStacksEpoch; @@ -87,20 +86,55 @@ pub trait HeadersDB { fn get_stacks_block_header_hash_for_block( &self, id_bhh: &StacksBlockId, + epoch: &StacksEpochId, ) -> Option; fn get_burn_header_hash_for_block(&self, id_bhh: &StacksBlockId) -> Option; - fn get_consensus_hash_for_block(&self, id_bhh: &StacksBlockId) -> Option; - fn get_vrf_seed_for_block(&self, id_bhh: &StacksBlockId) -> Option; - fn get_burn_block_time_for_block(&self, id_bhh: &StacksBlockId) -> Option; + fn get_consensus_hash_for_block( + &self, + id_bhh: &StacksBlockId, + epoch: &StacksEpochId, + ) -> Option; + fn get_vrf_seed_for_block( + &self, + id_bhh: &StacksBlockId, + epoch: &StacksEpochId, + ) -> Option; + fn get_stacks_block_time_for_block(&self, id_bhh: &StacksBlockId) -> Option; + fn get_burn_block_time_for_block( + &self, + id_bhh: &StacksBlockId, + epoch: Option<&StacksEpochId>, + ) -> Option; fn get_burn_block_height_for_block(&self, id_bhh: &StacksBlockId) -> Option; - fn get_miner_address(&self, id_bhh: &StacksBlockId) -> Option; - fn get_burnchain_tokens_spent_for_block(&self, id_bhh: &StacksBlockId) -> Option; - fn get_burnchain_tokens_spent_for_winning_block(&self, id_bhh: &StacksBlockId) -> Option; - fn get_tokens_earned_for_block(&self, id_bhh: &StacksBlockId) -> Option; + fn get_miner_address( + &self, + id_bhh: &StacksBlockId, + epoch: &StacksEpochId, + ) -> Option; + fn get_burnchain_tokens_spent_for_block( + &self, + id_bhh: &StacksBlockId, + epoch: &StacksEpochId, + ) -> Option; + fn get_burnchain_tokens_spent_for_winning_block( + &self, + id_bhh: &StacksBlockId, + epoch: &StacksEpochId, + ) -> Option; + fn get_tokens_earned_for_block( + &self, + id_bhh: &StacksBlockId, + epoch: &StacksEpochId, + ) -> Option; } pub trait BurnStateDB { + /// Get the burn chain height at the current tip. + fn get_tip_burn_block_height(&self) -> Option; + /// Get the sortition id for the current tip. + fn get_tip_sortition_id(&self) -> Option; + fn get_v1_unlock_height(&self) -> u32; fn get_v2_unlock_height(&self) -> u32; fn get_v3_unlock_height(&self) -> u32; @@ -149,118 +183,6 @@ pub trait BurnStateDB { ) -> Option<(Vec, u128)>; } -impl HeadersDB for &dyn HeadersDB { - fn get_stacks_block_header_hash_for_block( - &self, - id_bhh: &StacksBlockId, - ) -> Option { - (*self).get_stacks_block_header_hash_for_block(id_bhh) - } - fn get_burn_header_hash_for_block(&self, bhh: &StacksBlockId) -> Option { - (*self).get_burn_header_hash_for_block(bhh) - } - fn get_consensus_hash_for_block(&self, id_bhh: &StacksBlockId) -> Option { - (*self).get_consensus_hash_for_block(id_bhh) - } - fn get_vrf_seed_for_block(&self, bhh: &StacksBlockId) -> Option { - (*self).get_vrf_seed_for_block(bhh) - } - fn get_burn_block_time_for_block(&self, bhh: &StacksBlockId) -> Option { - (*self).get_burn_block_time_for_block(bhh) - } - fn get_burn_block_height_for_block(&self, bhh: &StacksBlockId) -> Option { - (*self).get_burn_block_height_for_block(bhh) - } - fn get_miner_address(&self, bhh: &StacksBlockId) -> Option { - (*self).get_miner_address(bhh) - } - fn get_burnchain_tokens_spent_for_block(&self, id_bhh: &StacksBlockId) -> Option { - (*self).get_burnchain_tokens_spent_for_block(id_bhh) - } - fn get_burnchain_tokens_spent_for_winning_block(&self, id_bhh: &StacksBlockId) -> Option { - (*self).get_burnchain_tokens_spent_for_winning_block(id_bhh) - } - fn get_tokens_earned_for_block(&self, id_bhh: &StacksBlockId) -> Option { - (*self).get_tokens_earned_for_block(id_bhh) - } -} - -impl BurnStateDB for &dyn BurnStateDB { - fn get_v1_unlock_height(&self) -> u32 { - (*self).get_v1_unlock_height() - } - - fn get_v2_unlock_height(&self) -> u32 { - (*self).get_v2_unlock_height() - } - - fn get_v3_unlock_height(&self) -> u32 { - (*self).get_v3_unlock_height() - } - - fn get_pox_3_activation_height(&self) -> u32 { - (*self).get_pox_3_activation_height() - } - - fn get_pox_4_activation_height(&self) -> u32 { - (*self).get_pox_4_activation_height() - } - - fn get_burn_block_height(&self, sortition_id: &SortitionId) -> Option { - (*self).get_burn_block_height(sortition_id) - } - - fn get_sortition_id_from_consensus_hash( - &self, - consensus_hash: &ConsensusHash, - ) -> Option { - (*self).get_sortition_id_from_consensus_hash(consensus_hash) - } - - fn get_burn_start_height(&self) -> u32 { - (*self).get_burn_start_height() - } - - fn get_burn_header_hash( - &self, - height: u32, - sortition_id: &SortitionId, - ) -> Option { - (*self).get_burn_header_hash(height, sortition_id) - } - - fn get_stacks_epoch(&self, height: u32) -> Option { - (*self).get_stacks_epoch(height) - } - - fn get_pox_prepare_length(&self) -> u32 { - (*self).get_pox_prepare_length() - } - - fn get_pox_reward_cycle_length(&self) -> u32 { - (*self).get_pox_reward_cycle_length() - } - - fn get_pox_rejection_fraction(&self) -> u64 { - (*self).get_pox_rejection_fraction() - } - fn get_stacks_epoch_by_epoch_id(&self, epoch_id: &StacksEpochId) -> Option { - (*self).get_stacks_epoch_by_epoch_id(epoch_id) - } - - fn get_ast_rules(&self, height: u32) -> ASTRules { - (*self).get_ast_rules(height) - } - - fn get_pox_payout_addrs( - &self, - height: u32, - sortition_id: &SortitionId, - ) -> Option<(Vec, u128)> { - (*self).get_pox_payout_addrs(height, sortition_id) - } -} - pub struct NullHeadersDB {} pub struct NullBurnStateDB { epoch: StacksEpochId, @@ -286,12 +208,17 @@ impl HeadersDB for NullHeadersDB { None } } - fn get_vrf_seed_for_block(&self, _bhh: &StacksBlockId) -> Option { + fn get_vrf_seed_for_block( + &self, + _bhh: &StacksBlockId, + _epoch: &StacksEpochId, + ) -> Option { None } fn get_stacks_block_header_hash_for_block( &self, id_bhh: &StacksBlockId, + _epoch: &StacksEpochId, ) -> Option { if *id_bhh == StacksBlockId::new(&FIRST_BURNCHAIN_CONSENSUS_HASH, &FIRST_STACKS_BLOCK_HASH) { @@ -300,10 +227,18 @@ impl HeadersDB for NullHeadersDB { None } } - fn get_consensus_hash_for_block(&self, _id_bhh: &StacksBlockId) -> Option { + fn get_consensus_hash_for_block( + &self, + _id_bhh: &StacksBlockId, + _epoch: &StacksEpochId, + ) -> Option { None } - fn get_burn_block_time_for_block(&self, id_bhh: &StacksBlockId) -> Option { + fn get_burn_block_time_for_block( + &self, + id_bhh: &StacksBlockId, + _epoch: Option<&StacksEpochId>, + ) -> Option { if *id_bhh == StacksBlockId::new(&FIRST_BURNCHAIN_CONSENSUS_HASH, &FIRST_STACKS_BLOCK_HASH) { Some(BITCOIN_REGTEST_FIRST_BLOCK_TIMESTAMP as u64) @@ -311,6 +246,9 @@ impl HeadersDB for NullHeadersDB { None } } + fn get_stacks_block_time_for_block(&self, _id_bhh: &StacksBlockId) -> Option { + None + } fn get_burn_block_height_for_block(&self, id_bhh: &StacksBlockId) -> Option { if *id_bhh == StacksBlockId::new(&FIRST_BURNCHAIN_CONSENSUS_HASH, &FIRST_STACKS_BLOCK_HASH) { @@ -319,25 +257,46 @@ impl HeadersDB for NullHeadersDB { Some(1) } } - fn get_miner_address(&self, _id_bhh: &StacksBlockId) -> Option { + fn get_miner_address( + &self, + _id_bhh: &StacksBlockId, + _epoch: &StacksEpochId, + ) -> Option { None } - fn get_burnchain_tokens_spent_for_block(&self, _id_bhh: &StacksBlockId) -> Option { + fn get_burnchain_tokens_spent_for_block( + &self, + _id_bhh: &StacksBlockId, + _epoch: &StacksEpochId, + ) -> Option { None } fn get_burnchain_tokens_spent_for_winning_block( &self, _id_bhh: &StacksBlockId, + _epoch: &StacksEpochId, ) -> Option { None } - fn get_tokens_earned_for_block(&self, _id_bhh: &StacksBlockId) -> Option { + fn get_tokens_earned_for_block( + &self, + _id_bhh: &StacksBlockId, + _epoch: &StacksEpochId, + ) -> Option { None } } #[allow(clippy::panic)] impl BurnStateDB for NullBurnStateDB { + fn get_tip_burn_block_height(&self) -> Option { + Some(0) + } + + fn get_tip_sortition_id(&self) -> Option { + None + } + fn get_burn_block_height(&self, _sortition_id: &SortitionId) -> Option { None } @@ -367,7 +326,7 @@ impl BurnStateDB for NullBurnStateDB { start_height: 0, end_height: u64::MAX, block_limit: ExecutionCost::max_value(), - network_epoch: PEER_VERSION_EPOCH_2_0, + network_epoch: 0, }) } fn get_stacks_epoch_by_epoch_id(&self, _epoch_id: &StacksEpochId) -> Option { @@ -855,6 +814,38 @@ impl<'a> ClarityDatabase<'a> { Ok(()) } + /// Returns the tenure height of the current block. + pub fn get_tenure_height(&mut self) -> Result { + if self.get_clarity_epoch_version()? < StacksEpochId::Epoch30 { + // Before epoch 3.0, the tenure height was not stored in the + // Clarity state. Instead, it was the same as the block height. + return Ok(self.get_current_block_height()); + } + + self.get_data(TENURE_HEIGHT_KEY)? + .ok_or_else(|| { + InterpreterError::Expect("No tenure height in stored Clarity state".into()).into() + }) + .and_then(|x| { + u32::try_into(x).map_err(|_| { + InterpreterError::Expect("Bad tenure height in stored Clarity state".into()) + .into() + }) + }) + } + + /// Set the tenure height of the current block. In the first block of a new + /// tenure, this height must be incremented before evaluating any + /// transactions in the block. + pub fn set_tenure_height(&mut self, height: u32) -> Result<()> { + if self.get_clarity_epoch_version()? < StacksEpochId::Epoch30 { + return Err(Error::Interpreter(InterpreterError::Expect( + "Setting tenure height in Clarity state is not supported before epoch 3.0".into(), + ))); + } + self.put_data(TENURE_HEIGHT_KEY, &height) + } + pub fn destroy(self) -> RollbackWrapper<'a> { self.store } @@ -931,39 +922,69 @@ impl<'a> ClarityDatabase<'a> { /// `get_current_block_height`). pub fn get_current_burnchain_block_height(&mut self) -> Result { let cur_stacks_height = self.store.get_current_block_height(); - let last_mined_bhh = if cur_stacks_height == 0 { - return Ok(self.burn_state_db.get_burn_start_height()); - } else { - self.get_index_block_header_hash(cur_stacks_height.checked_sub(1).ok_or_else( - || { - InterpreterError::Expect( - "BUG: cannot eval burn-block-height in boot code".into(), - ) - }, - )?)? - }; - self.get_burnchain_block_height(&last_mined_bhh) - .ok_or_else(|| { - InterpreterError::Expect(format!( - "Block header hash '{}' must return for provided stacks block height {}", - &last_mined_bhh, cur_stacks_height - )) - .into() - }) + // Before epoch 3.0, we can only access the burn block associated with the last block + if !self + .get_clarity_epoch_version()? + .clarity_uses_tip_burn_block() + { + if cur_stacks_height == 0 { + return Ok(self.burn_state_db.get_burn_start_height()); + }; + // Safety note: normal subtraction is safe here, because we've already checked + // that cur_stacks_height > 0. + let last_mined_bhh = self.get_index_block_header_hash(cur_stacks_height - 1)?; + + self.get_burnchain_block_height(&last_mined_bhh) + .ok_or_else(|| { + InterpreterError::Expect(format!( + "Block header hash '{}' must return for provided stacks block height {}", + &last_mined_bhh, cur_stacks_height + )) + .into() + }) + } else { + // In epoch 3+, we can access the current burnchain block + self.burn_state_db + .get_tip_burn_block_height() + .ok_or_else(|| { + InterpreterError::Expect("Failed to get burnchain tip height.".into()).into() + }) + } } pub fn get_block_header_hash(&mut self, block_height: u32) -> Result { let id_bhh = self.get_index_block_header_hash(block_height)?; + let epoch = self.get_stacks_epoch_for_block(&id_bhh)?; self.headers_db - .get_stacks_block_header_hash_for_block(&id_bhh) + .get_stacks_block_header_hash_for_block(&id_bhh, &epoch) + .ok_or_else(|| InterpreterError::Expect("Failed to get block data.".into()).into()) + } + + pub fn get_burn_block_time( + &mut self, + block_height: u32, + id_bhh_opt: Option, + ) -> Result { + let id_bhh = match id_bhh_opt { + Some(x) => x, + None => self.get_index_block_header_hash(block_height)?, + }; + let epoch = self.get_stacks_epoch_for_block(&id_bhh)?; + self.headers_db + .get_burn_block_time_for_block(&id_bhh, Some(&epoch)) .ok_or_else(|| InterpreterError::Expect("Failed to get block data.".into()).into()) } pub fn get_block_time(&mut self, block_height: u32) -> Result { let id_bhh = self.get_index_block_header_hash(block_height)?; + let epoch = self.get_stacks_epoch_for_block(&id_bhh)?; + if !epoch.uses_nakamoto_blocks() { + return self.get_burn_block_time(block_height, Some(id_bhh)); + } + self.headers_db - .get_burn_block_time_for_block(&id_bhh) + .get_stacks_block_time_for_block(&id_bhh) .ok_or_else(|| InterpreterError::Expect("Failed to get block data.".into()).into()) } @@ -977,46 +998,57 @@ impl<'a> ClarityDatabase<'a> { .ok_or_else(|| InterpreterError::Expect("Failed to get block data.".into()).into()) } + /// In Epoch 2.x: /// 1. Get the current Stacks tip height (which is in the process of being evaluated) /// 2. Get the parent block's StacksBlockId, which is SHA512-256(consensus_hash, block_hash). /// This is the highest Stacks block in this fork whose consensus hash is known. /// 3. Resolve the parent StacksBlockId to its consensus hash /// 4. Resolve the consensus hash to the associated SortitionId + /// In Epoch 3+: + /// 1. Get the SortitionId of the current Stacks tip fn get_sortition_id_for_stacks_tip(&mut self) -> Result> { - let current_stacks_height = self.get_current_block_height(); + if !self + .get_clarity_epoch_version()? + .clarity_uses_tip_burn_block() + { + let current_stacks_height = self.get_current_block_height(); - if current_stacks_height < 1 { - // we are in the Stacks genesis block - return Ok(None); - } + if current_stacks_height < 1 { + // we are in the Stacks genesis block + return Ok(None); + } - // this is the StacksBlockId of the last block evaluated in this fork - let parent_id_bhh = self.get_index_block_header_hash(current_stacks_height - 1)?; + // this is the StacksBlockId of the last block evaluated in this fork + let parent_id_bhh = self.get_index_block_header_hash(current_stacks_height - 1)?; + let epoch = self.get_stacks_epoch_for_block(&parent_id_bhh)?; - // infallible, since we always store the consensus hash with the StacksBlockId in the - // headers DB - let consensus_hash = self - .headers_db - .get_consensus_hash_for_block(&parent_id_bhh) - .ok_or_else(|| { - InterpreterError::Expect(format!( - "FATAL: no consensus hash found for StacksBlockId {}", - &parent_id_bhh - )) - })?; + // infallible, since we always store the consensus hash with the StacksBlockId in the + // headers DB + let consensus_hash = self + .headers_db + .get_consensus_hash_for_block(&parent_id_bhh, &epoch) + .ok_or_else(|| { + InterpreterError::Expect(format!( + "FATAL: no consensus hash found for StacksBlockId {}", + &parent_id_bhh + )) + })?; - // infallible, since every sortition has a consensus hash - let sortition_id = self - .burn_state_db - .get_sortition_id_from_consensus_hash(&consensus_hash) - .ok_or_else(|| { - InterpreterError::Expect(format!( - "FATAL: no SortitionID found for consensus hash {}", - &consensus_hash - )) - })?; + // infallible, since every sortition has a consensus hash + let sortition_id = self + .burn_state_db + .get_sortition_id_from_consensus_hash(&consensus_hash) + .ok_or_else(|| { + InterpreterError::Expect(format!( + "FATAL: no SortitionID found for consensus hash {}", + &consensus_hash + )) + })?; - Ok(Some(sortition_id)) + Ok(Some(sortition_id)) + } else { + Ok(self.burn_state_db.get_tip_sortition_id()) + } } /// Fetch the burnchain block header hash for a given burnchain height. @@ -1054,22 +1086,24 @@ impl<'a> ClarityDatabase<'a> { .get_pox_payout_addrs(burnchain_block_height, &sortition_id)) } - pub fn get_burnchain_block_height(&mut self, id_bhh: &StacksBlockId) -> Option { + pub fn get_burnchain_block_height(&self, id_bhh: &StacksBlockId) -> Option { self.headers_db.get_burn_block_height_for_block(id_bhh) } pub fn get_block_vrf_seed(&mut self, block_height: u32) -> Result { let id_bhh = self.get_index_block_header_hash(block_height)?; + let epoch = self.get_stacks_epoch_for_block(&id_bhh)?; self.headers_db - .get_vrf_seed_for_block(&id_bhh) + .get_vrf_seed_for_block(&id_bhh, &epoch) .ok_or_else(|| InterpreterError::Expect("Failed to get block data.".into()).into()) } pub fn get_miner_address(&mut self, block_height: u32) -> Result { let id_bhh = self.get_index_block_header_hash(block_height)?; + let epoch = self.get_stacks_epoch_for_block(&id_bhh)?; Ok(self .headers_db - .get_miner_address(&id_bhh) + .get_miner_address(&id_bhh, &epoch) .ok_or_else(|| InterpreterError::Expect("Failed to get block data.".into()))? .into()) } @@ -1080,9 +1114,10 @@ impl<'a> ClarityDatabase<'a> { } let id_bhh = self.get_index_block_header_hash(block_height)?; + let epoch = self.get_stacks_epoch_for_block(&id_bhh)?; Ok(self .headers_db - .get_burnchain_tokens_spent_for_winning_block(&id_bhh) + .get_burnchain_tokens_spent_for_winning_block(&id_bhh, &epoch) .ok_or_else(|| { InterpreterError::Expect( "FATAL: no winning burnchain token spend record for block".into(), @@ -1097,9 +1132,10 @@ impl<'a> ClarityDatabase<'a> { } let id_bhh = self.get_index_block_header_hash(block_height)?; + let epoch = self.get_stacks_epoch_for_block(&id_bhh)?; Ok(self .headers_db - .get_burnchain_tokens_spent_for_block(&id_bhh) + .get_burnchain_tokens_spent_for_block(&id_bhh, &epoch) .ok_or_else(|| { InterpreterError::Expect( "FATAL: no total burnchain token spend record for block".into(), @@ -1122,9 +1158,10 @@ impl<'a> ClarityDatabase<'a> { } let id_bhh = self.get_index_block_header_hash(block_height)?; + let epoch = self.get_stacks_epoch_for_block(&id_bhh)?; let reward: u128 = self .headers_db - .get_tokens_earned_for_block(&id_bhh) + .get_tokens_earned_for_block(&id_bhh, &epoch) .map(|x| x.into()) .ok_or_else(|| { InterpreterError::Expect("FATAL: matured block has no recorded reward".into()) @@ -2135,4 +2172,17 @@ impl<'a> ClarityDatabase<'a> { pub fn get_stacks_epoch(&self, height: u32) -> Option { self.burn_state_db.get_stacks_epoch(height) } + + pub fn get_stacks_epoch_for_block(&self, id_bhh: &StacksBlockId) -> Result { + let burn_block = self.get_burnchain_block_height(&id_bhh).ok_or_else(|| { + InterpreterError::Expect(format!( + "FATAL: no burnchain block height found for Stacks block {}", + id_bhh + )) + })?; + let epoch = self + .get_stacks_epoch(burn_block) + .ok_or_else(|| InterpreterError::Expect("Failed to get block data.".into()))?; + Ok(epoch.epoch_id) + } } diff --git a/clarity/src/vm/database/clarity_store.rs b/clarity/src/vm/database/clarity_store.rs index afe2c550ba6..b6a45ee7643 100644 --- a/clarity/src/vm/database/clarity_store.rs +++ b/clarity/src/vm/database/clarity_store.rs @@ -16,15 +16,18 @@ use std::path::PathBuf; +#[cfg(feature = "canonical")] use rusqlite::Connection; use stacks_common::types::chainstate::{BlockHeaderHash, StacksBlockId, VRFSeed}; use stacks_common::util::hash::{hex_bytes, to_hex, Hash160, Sha512Trunc256Sum}; use crate::vm::analysis::AnalysisDatabase; use crate::vm::contexts::GlobalContext; +#[cfg(feature = "canonical")] +use crate::vm::database::SqliteConnection; use crate::vm::database::{ BurnStateDB, ClarityDatabase, ClarityDeserializable, ClaritySerializable, HeadersDB, - SqliteConnection, NULL_BURN_STATE_DB, NULL_HEADER_DB, + NULL_BURN_STATE_DB, NULL_HEADER_DB, }; use crate::vm::errors::{ CheckErrors, IncomparableError, InterpreterError, InterpreterResult as Result, @@ -83,6 +86,8 @@ pub trait ClarityBackingStore { fn get_open_chain_tip_height(&mut self) -> u32; fn get_open_chain_tip(&mut self) -> StacksBlockId; + + #[cfg(feature = "canonical")] fn get_side_store(&mut self) -> &Connection; fn get_cc_special_cases_handler(&self) -> Option { @@ -106,59 +111,27 @@ pub trait ClarityBackingStore { fn get_contract_hash( &mut self, contract: &QualifiedContractIdentifier, - ) -> Result<(StacksBlockId, Sha512Trunc256Sum)> { - let key = make_contract_hash_key(contract); - let contract_commitment = self - .get_data(&key)? - .map(|x| ContractCommitment::deserialize(&x)) - .ok_or_else(|| CheckErrors::NoSuchContract(contract.to_string()))?; - let ContractCommitment { - block_height, - hash: contract_hash, - } = contract_commitment?; - let bhh = self.get_block_at_height(block_height) - .ok_or_else(|| InterpreterError::Expect("Should always be able to map from height to block hash when looking up contract information.".into()))?; - Ok((bhh, contract_hash)) - } + ) -> Result<(StacksBlockId, Sha512Trunc256Sum)>; fn insert_metadata( &mut self, contract: &QualifiedContractIdentifier, key: &str, value: &str, - ) -> Result<()> { - let bhh = self.get_open_chain_tip(); - SqliteConnection::insert_metadata( - self.get_side_store(), - &bhh, - &contract.to_string(), - key, - value, - ) - } + ) -> Result<()>; fn get_metadata( &mut self, contract: &QualifiedContractIdentifier, key: &str, - ) -> Result> { - let (bhh, _) = self.get_contract_hash(contract)?; - SqliteConnection::get_metadata(self.get_side_store(), &bhh, &contract.to_string(), key) - } + ) -> Result>; fn get_metadata_manual( &mut self, at_height: u32, contract: &QualifiedContractIdentifier, key: &str, - ) -> Result> { - let bhh = self.get_block_at_height(at_height) - .ok_or_else(|| { - warn!("Unknown block height when manually querying metadata"; "block_height" => at_height); - RuntimeErrorType::BadBlockHeight(at_height.to_string()) - })?; - SqliteConnection::get_metadata(self.get_side_store(), &bhh, &contract.to_string(), key) - } + ) -> Result>; fn put_all_metadata( &mut self, @@ -240,6 +213,7 @@ impl ClarityBackingStore for NullBackingStore { panic!("NullBackingStore can't retrieve data") } + #[cfg(feature = "canonical")] fn get_side_store(&mut self) -> &Connection { panic!("NullBackingStore has no side store") } @@ -263,84 +237,37 @@ impl ClarityBackingStore for NullBackingStore { fn put_all_data(&mut self, mut _items: Vec<(String, String)>) -> Result<()> { panic!("NullBackingStore cannot put") } -} - -pub struct MemoryBackingStore { - side_store: Connection, -} -impl Default for MemoryBackingStore { - fn default() -> Self { - MemoryBackingStore::new() - } -} - -impl MemoryBackingStore { - #[allow(clippy::unwrap_used)] - pub fn new() -> MemoryBackingStore { - let side_store = SqliteConnection::memory().unwrap(); - - let mut memory_marf = MemoryBackingStore { side_store }; - - memory_marf.as_clarity_db().initialize(); - - memory_marf - } - - pub fn as_clarity_db(&mut self) -> ClarityDatabase { - ClarityDatabase::new(self, &NULL_HEADER_DB, &NULL_BURN_STATE_DB) - } - - pub fn as_analysis_db(&mut self) -> AnalysisDatabase { - AnalysisDatabase::new(self) - } -} - -impl ClarityBackingStore for MemoryBackingStore { - fn set_block_hash(&mut self, bhh: StacksBlockId) -> InterpreterResult { - Err(RuntimeErrorType::UnknownBlockHeaderHash(BlockHeaderHash(bhh.0)).into()) - } - - fn get_data(&mut self, key: &str) -> Result> { - SqliteConnection::get(self.get_side_store(), key) - } - - fn get_data_with_proof(&mut self, key: &str) -> Result)>> { - Ok(SqliteConnection::get(self.get_side_store(), key)?.map(|x| (x, vec![]))) - } - - fn get_side_store(&mut self) -> &Connection { - &self.side_store - } - - fn get_block_at_height(&mut self, height: u32) -> Option { - if height == 0 { - Some(StacksBlockId([255; 32])) - } else { - None - } - } - - fn get_open_chain_tip(&mut self) -> StacksBlockId { - StacksBlockId([255; 32]) - } - - fn get_open_chain_tip_height(&mut self) -> u32 { - 0 + fn get_contract_hash( + &mut self, + _contract: &QualifiedContractIdentifier, + ) -> Result<(StacksBlockId, Sha512Trunc256Sum)> { + panic!("NullBackingStore cannot get_contract_hash") } - fn get_current_block_height(&mut self) -> u32 { - 1 + fn insert_metadata( + &mut self, + _contract: &QualifiedContractIdentifier, + _key: &str, + _value: &str, + ) -> Result<()> { + panic!("NullBackingStore cannot insert_metadata") } - fn get_cc_special_cases_handler(&self) -> Option { - None + fn get_metadata( + &mut self, + _contract: &QualifiedContractIdentifier, + _key: &str, + ) -> Result> { + panic!("NullBackingStore cannot get_metadata") } - fn put_all_data(&mut self, items: Vec<(String, String)>) -> Result<()> { - for (key, value) in items.into_iter() { - SqliteConnection::put(self.get_side_store(), &key, &value)?; - } - Ok(()) + fn get_metadata_manual( + &mut self, + _at_height: u32, + _contract: &QualifiedContractIdentifier, + _key: &str, + ) -> Result> { + panic!("NullBackingStore cannot get_metadata_manual") } } diff --git a/clarity/src/vm/database/key_value_wrapper.rs b/clarity/src/vm/database/key_value_wrapper.rs index 69eb74b39ed..3fd845f92ff 100644 --- a/clarity/src/vm/database/key_value_wrapper.rs +++ b/clarity/src/vm/database/key_value_wrapper.rs @@ -31,15 +31,15 @@ use crate::vm::types::{ }; use crate::vm::{StacksEpoch, Value}; -#[cfg(rollback_value_check)] +#[cfg(feature = "rollback_value_check")] type RollbackValueCheck = String; -#[cfg(not(rollback_value_check))] +#[cfg(not(feature = "rollback_value_check"))] type RollbackValueCheck = (); -#[cfg(not(rollback_value_check))] +#[cfg(not(feature = "rollback_value_check"))] fn rollback_value_check(_value: &str, _check: &RollbackValueCheck) {} -#[cfg(not(rollback_value_check))] +#[cfg(not(feature = "rollback_value_check"))] fn rollback_edits_push(edits: &mut Vec<(T, RollbackValueCheck)>, key: T, _value: &str) { edits.push((key, ())); } @@ -47,7 +47,7 @@ fn rollback_edits_push(edits: &mut Vec<(T, RollbackValueCheck)>, key: T, _val // wrapper -- i.e., when committing to the underlying store. for the _unchecked_ implementation // this is used to get the edit _value_ out of the lookupmap, for used in the subsequent `put_all` // command. -#[cfg(not(rollback_value_check))] +#[cfg(not(feature = "rollback_value_check"))] fn rollback_check_pre_bottom_commit( edits: Vec<(T, RollbackValueCheck)>, lookup_map: &mut HashMap>, @@ -71,11 +71,11 @@ where output } -#[cfg(rollback_value_check)] +#[cfg(feature = "rollback_value_check")] fn rollback_value_check(value: &String, check: &RollbackValueCheck) { assert_eq!(value, check) } -#[cfg(rollback_value_check)] +#[cfg(feature = "rollback_value_check")] fn rollback_edits_push(edits: &mut Vec<(T, RollbackValueCheck)>, key: T, value: &String) where T: Eq + Hash + Clone, @@ -84,7 +84,7 @@ where } // this function is used to check the lookup map when committing at the "bottom" of the // wrapper -- i.e., when committing to the underlying store. -#[cfg(rollback_value_check)] +#[cfg(feature = "rollback_value_check")] fn rollback_check_pre_bottom_commit( edits: Vec<(T, RollbackValueCheck)>, lookup_map: &mut HashMap>, diff --git a/clarity/src/vm/database/mod.rs b/clarity/src/vm/database/mod.rs index 1092992982a..d16d944d557 100644 --- a/clarity/src/vm/database/mod.rs +++ b/clarity/src/vm/database/mod.rs @@ -15,13 +15,16 @@ // along with this program. If not, see . use hashbrown::HashMap; +#[cfg(feature = "canonical")] +pub use sqlite::MemoryBackingStore; pub use self::clarity_db::{ BurnStateDB, ClarityDatabase, HeadersDB, StoreType, NULL_BURN_STATE_DB, NULL_HEADER_DB, STORE_CONTRACT_SRC_INTERFACE, }; -pub use self::clarity_store::{ClarityBackingStore, MemoryBackingStore, SpecialCaseHandler}; +pub use self::clarity_store::{ClarityBackingStore, SpecialCaseHandler}; pub use self::key_value_wrapper::{RollbackWrapper, RollbackWrapperPersistedLog}; +#[cfg(feature = "canonical")] pub use self::sqlite::SqliteConnection; pub use self::structures::{ ClarityDeserializable, ClaritySerializable, DataMapMetadata, DataVariableMetadata, @@ -31,5 +34,6 @@ pub use self::structures::{ pub mod clarity_db; pub mod clarity_store; mod key_value_wrapper; -mod sqlite; +#[cfg(feature = "canonical")] +pub mod sqlite; mod structures; diff --git a/clarity/src/vm/database/sqlite.rs b/clarity/src/vm/database/sqlite.rs index 6b2d64afa5f..dc3ad4f5bde 100644 --- a/clarity/src/vm/database/sqlite.rs +++ b/clarity/src/vm/database/sqlite.rs @@ -14,18 +14,28 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -use rusqlite::types::{FromSql, ToSql}; +use rusqlite::types::{FromSql, FromSqlResult, ToSql, ToSqlOutput, ValueRef}; use rusqlite::{ - Connection, Error as SqliteError, ErrorCode as SqliteErrorCode, OptionalExtension, Row, - Savepoint, NO_PARAMS, + params, Connection, Error as SqliteError, ErrorCode as SqliteErrorCode, OptionalExtension, Row, + Savepoint, }; -use stacks_common::types::chainstate::StacksBlockId; +use stacks_common::types::chainstate::{BlockHeaderHash, StacksBlockId}; +use stacks_common::types::sqlite::NO_PARAMS; use stacks_common::util::db_common::tx_busy_handler; +use stacks_common::util::hash::Sha512Trunc256Sum; +use super::clarity_store::{make_contract_hash_key, ContractCommitment}; +use super::{ + ClarityBackingStore, ClarityDatabase, ClarityDeserializable, SpecialCaseHandler, + NULL_BURN_STATE_DB, NULL_HEADER_DB, +}; +use crate::vm::analysis::{AnalysisDatabase, CheckErrors}; use crate::vm::contracts::Contract; +use crate::vm::costs::ExecutionCost; use crate::vm::errors::{ Error, IncomparableError, InterpreterError, InterpreterResult as Result, RuntimeErrorType, }; +use crate::vm::types::QualifiedContractIdentifier; const SQL_FAIL_MESSAGE: &str = "PANIC: SQL Failure in Smart Contract VM."; @@ -34,11 +44,8 @@ pub struct SqliteConnection { } fn sqlite_put(conn: &Connection, key: &str, value: &str) -> Result<()> { - let params: [&dyn ToSql; 2] = [&key, &value]; - match conn.execute( - "REPLACE INTO data_table (key, value) VALUES (?, ?)", - ¶ms, - ) { + let params = params![key, value]; + match conn.execute("REPLACE INTO data_table (key, value) VALUES (?, ?)", params) { Ok(_) => Ok(()), Err(e) => { error!("Failed to insert/replace ({},{}): {:?}", key, value, &e); @@ -49,11 +56,11 @@ fn sqlite_put(conn: &Connection, key: &str, value: &str) -> Result<()> { fn sqlite_get(conn: &Connection, key: &str) -> Result> { trace!("sqlite_get {}", key); - let params: [&dyn ToSql; 1] = [&key]; + let params = params![key]; let res = match conn .query_row( "SELECT value FROM data_table WHERE key = ?", - ¶ms, + params, |row| row.get(0), ) .optional() @@ -73,6 +80,62 @@ fn sqlite_has_entry(conn: &Connection, key: &str) -> Result { Ok(sqlite_get(conn, key)?.is_some()) } +pub fn sqlite_get_contract_hash( + store: &mut dyn ClarityBackingStore, + contract: &QualifiedContractIdentifier, +) -> Result<(StacksBlockId, Sha512Trunc256Sum)> { + let key = make_contract_hash_key(contract); + let contract_commitment = store + .get_data(&key)? + .map(|x| ContractCommitment::deserialize(&x)) + .ok_or_else(|| CheckErrors::NoSuchContract(contract.to_string()))?; + let ContractCommitment { + block_height, + hash: contract_hash, + } = contract_commitment?; + let bhh = store.get_block_at_height(block_height) + .ok_or_else(|| InterpreterError::Expect("Should always be able to map from height to block hash when looking up contract information.".into()))?; + Ok((bhh, contract_hash)) +} + +pub fn sqlite_insert_metadata( + store: &mut dyn ClarityBackingStore, + contract: &QualifiedContractIdentifier, + key: &str, + value: &str, +) -> Result<()> { + let bhh = store.get_open_chain_tip(); + SqliteConnection::insert_metadata( + store.get_side_store(), + &bhh, + &contract.to_string(), + key, + value, + ) +} + +pub fn sqlite_get_metadata( + store: &mut dyn ClarityBackingStore, + contract: &QualifiedContractIdentifier, + key: &str, +) -> Result> { + let (bhh, _) = store.get_contract_hash(contract)?; + SqliteConnection::get_metadata(store.get_side_store(), &bhh, &contract.to_string(), key) +} + +pub fn sqlite_get_metadata_manual( + store: &mut dyn ClarityBackingStore, + at_height: u32, + contract: &QualifiedContractIdentifier, + key: &str, +) -> Result> { + let bhh = store.get_block_at_height(at_height).ok_or_else(|| { + warn!("Unknown block height when manually querying metadata"; "block_height" => at_height); + RuntimeErrorType::BadBlockHeight(at_height.to_string()) + })?; + SqliteConnection::get_metadata(store.get_side_store(), &bhh, &contract.to_string(), key) +} + impl SqliteConnection { pub fn put(conn: &Connection, key: &str, value: &str) -> Result<()> { sqlite_put(conn, key, value) @@ -90,11 +153,11 @@ impl SqliteConnection { value: &str, ) -> Result<()> { let key = format!("clr-meta::{}::{}", contract_hash, key); - let params: [&dyn ToSql; 3] = [&bhh, &key, &value]; + let params = params![bhh, key, value]; if let Err(e) = conn.execute( "INSERT INTO metadata_table (blockhash, key, value) VALUES (?, ?, ?)", - ¶ms, + params, ) { error!( "Failed to insert ({},{},{}): {:?}", @@ -113,10 +176,10 @@ impl SqliteConnection { from: &StacksBlockId, to: &StacksBlockId, ) -> Result<()> { - let params = [to, from]; + let params = params![to, from]; if let Err(e) = conn.execute( "UPDATE metadata_table SET blockhash = ? WHERE blockhash = ?", - ¶ms, + params, ) { error!("Failed to update {} to {}: {:?}", &from, &to, &e); return Err(InterpreterError::DBError(SQL_FAIL_MESSAGE.into()).into()); @@ -125,7 +188,10 @@ impl SqliteConnection { } pub fn drop_metadata(conn: &Connection, from: &StacksBlockId) -> Result<()> { - if let Err(e) = conn.execute("DELETE FROM metadata_table WHERE blockhash = ?", &[from]) { + if let Err(e) = conn.execute( + "DELETE FROM metadata_table WHERE blockhash = ?", + params![from], + ) { error!("Failed to drop metadata from {}: {:?}", &from, &e); return Err(InterpreterError::DBError(SQL_FAIL_MESSAGE.into()).into()); } @@ -139,12 +205,12 @@ impl SqliteConnection { key: &str, ) -> Result> { let key = format!("clr-meta::{}::{}", contract_hash, key); - let params: [&dyn ToSql; 2] = [&bhh, &key]; + let params = params![bhh, key]; match conn .query_row( "SELECT value FROM metadata_table WHERE blockhash = ? AND key = ?", - ¶ms, + params, |row| row.get(0), ) .optional() @@ -199,10 +265,10 @@ impl SqliteConnection { pub fn check_schema(conn: &Connection) -> Result<()> { let sql = "SELECT sql FROM sqlite_master WHERE name=?"; let _: String = conn - .query_row(sql, &["data_table"], |row| row.get(0)) + .query_row(sql, params!["data_table"], |row| row.get(0)) .map_err(|x| InterpreterError::SqliteError(IncomparableError { err: x }))?; let _: String = conn - .query_row(sql, &["metadata_table"], |row| row.get(0)) + .query_row(sql, params!["metadata_table"], |row| row.get(0)) .map_err(|x| InterpreterError::SqliteError(IncomparableError { err: x }))?; Ok(()) } @@ -217,3 +283,133 @@ impl SqliteConnection { Ok(conn) } } + +pub struct MemoryBackingStore { + side_store: Connection, +} + +impl Default for MemoryBackingStore { + fn default() -> Self { + MemoryBackingStore::new() + } +} + +impl MemoryBackingStore { + #[allow(clippy::unwrap_used)] + pub fn new() -> MemoryBackingStore { + let side_store = SqliteConnection::memory().unwrap(); + + let mut memory_marf = MemoryBackingStore { side_store }; + + memory_marf.as_clarity_db().initialize(); + + memory_marf + } + + pub fn as_clarity_db(&mut self) -> ClarityDatabase { + ClarityDatabase::new(self, &NULL_HEADER_DB, &NULL_BURN_STATE_DB) + } + + pub fn as_analysis_db(&mut self) -> AnalysisDatabase { + AnalysisDatabase::new(self) + } +} + +impl ClarityBackingStore for MemoryBackingStore { + fn set_block_hash(&mut self, bhh: StacksBlockId) -> Result { + Err(RuntimeErrorType::UnknownBlockHeaderHash(BlockHeaderHash(bhh.0)).into()) + } + + fn get_data(&mut self, key: &str) -> Result> { + SqliteConnection::get(self.get_side_store(), key) + } + + fn get_data_with_proof(&mut self, key: &str) -> Result)>> { + Ok(SqliteConnection::get(self.get_side_store(), key)?.map(|x| (x, vec![]))) + } + + fn get_side_store(&mut self) -> &Connection { + &self.side_store + } + + fn get_block_at_height(&mut self, height: u32) -> Option { + if height == 0 { + Some(StacksBlockId([255; 32])) + } else { + None + } + } + + fn get_open_chain_tip(&mut self) -> StacksBlockId { + StacksBlockId([255; 32]) + } + + fn get_open_chain_tip_height(&mut self) -> u32 { + 0 + } + + fn get_current_block_height(&mut self) -> u32 { + 1 + } + + fn get_cc_special_cases_handler(&self) -> Option { + None + } + + fn put_all_data(&mut self, items: Vec<(String, String)>) -> Result<()> { + for (key, value) in items.into_iter() { + SqliteConnection::put(self.get_side_store(), &key, &value)?; + } + Ok(()) + } + + fn get_contract_hash( + &mut self, + contract: &QualifiedContractIdentifier, + ) -> Result<(StacksBlockId, Sha512Trunc256Sum)> { + sqlite_get_contract_hash(self, contract) + } + + fn insert_metadata( + &mut self, + contract: &QualifiedContractIdentifier, + key: &str, + value: &str, + ) -> Result<()> { + sqlite_insert_metadata(self, contract, key, value) + } + + fn get_metadata( + &mut self, + contract: &QualifiedContractIdentifier, + key: &str, + ) -> Result> { + sqlite_get_metadata(self, contract, key) + } + + fn get_metadata_manual( + &mut self, + at_height: u32, + contract: &QualifiedContractIdentifier, + key: &str, + ) -> Result> { + sqlite_get_metadata_manual(self, at_height, contract, key) + } +} + +impl ToSql for ExecutionCost { + fn to_sql(&self) -> rusqlite::Result { + let val = serde_json::to_string(self) + .map_err(|e| rusqlite::Error::ToSqlConversionFailure(Box::new(e)))?; + Ok(ToSqlOutput::from(val)) + } +} + +impl FromSql for ExecutionCost { + fn column_result(value: ValueRef) -> FromSqlResult { + let str_val = String::column_result(value)?; + let parsed = serde_json::from_str(&str_val) + .map_err(|e| rusqlite::types::FromSqlError::Other(Box::new(e)))?; + Ok(parsed) + } +} diff --git a/clarity/src/vm/docs/contracts.rs b/clarity/src/vm/docs/contracts.rs index 7426be7966f..138203db713 100644 --- a/clarity/src/vm/docs/contracts.rs +++ b/clarity/src/vm/docs/contracts.rs @@ -4,10 +4,13 @@ use hashbrown::{HashMap, HashSet}; use stacks_common::consts::CHAIN_ID_TESTNET; use stacks_common::types::StacksEpochId; -use crate::vm::analysis::{mem_type_check, ContractAnalysis}; +#[cfg(feature = "canonical")] +use crate::vm::analysis::mem_type_check; +use crate::vm::analysis::ContractAnalysis; use crate::vm::ast::{build_ast_with_rules, ASTRules}; use crate::vm::contexts::GlobalContext; use crate::vm::costs::LimitedCostTracker; +#[cfg(feature = "canonical")] use crate::vm::database::MemoryBackingStore; use crate::vm::docs::{get_input_type_string, get_output_type_string, get_signature}; use crate::vm::types::{FunctionType, QualifiedContractIdentifier, Value}; @@ -60,6 +63,7 @@ fn make_func_ref(func_name: &str, func_type: &FunctionType, description: &str) - } } +#[cfg(feature = "canonical")] #[allow(clippy::expect_used)] fn get_constant_value(var_name: &str, contract_content: &str) -> Value { let to_eval = format!("{}\n{}", contract_content, var_name); @@ -68,6 +72,7 @@ fn get_constant_value(var_name: &str, contract_content: &str) -> Value { .expect("BUG: failed to return constant value") } +#[cfg(feature = "canonical")] fn doc_execute(program: &str) -> Result, vm::Error> { let contract_id = QualifiedContractIdentifier::transient(); let mut contract_context = ContractContext::new(contract_id.clone(), ClarityVersion::Clarity2); @@ -94,11 +99,15 @@ fn doc_execute(program: &str) -> Result, vm::Error> { }) } +#[cfg(feature = "canonical")] #[allow(clippy::expect_used)] -pub fn make_docs(content: &str, support_docs: &ContractSupportDocs) -> ContractRef { - let (_, contract_analysis) = - mem_type_check(content, ClarityVersion::latest(), StacksEpochId::latest()) - .expect("BUG: failed to type check boot contract"); +pub fn make_docs( + content: &str, + support_docs: &ContractSupportDocs, + version: ClarityVersion, +) -> ContractRef { + let (_, contract_analysis) = mem_type_check(content, version, StacksEpochId::latest()) + .expect("BUG: failed to type check boot contract"); let ContractAnalysis { public_function_types, @@ -176,15 +185,17 @@ pub fn make_docs(content: &str, support_docs: &ContractSupportDocs) -> ContractR /// Produce a set of documents for multiple contracts, supplied as a list of `(contract_name, contract_content)` pairs, /// and a map from `contract_name` to corresponding `ContractSupportDocs` +#[cfg(feature = "canonical")] pub fn produce_docs_refs, B: AsRef>( contracts: &[(A, B)], support_docs: &HashMap<&str, ContractSupportDocs>, + version: ClarityVersion, ) -> BTreeMap { let mut docs = BTreeMap::new(); for (contract_name, content) in contracts.iter() { if let Some(contract_support) = support_docs.get(contract_name.as_ref()) { - let contract_ref = make_docs(content.as_ref(), contract_support); + let contract_ref = make_docs(content.as_ref(), contract_support, version); docs.insert(contract_name.as_ref().to_string(), contract_ref); } diff --git a/clarity/src/vm/docs/mod.rs b/clarity/src/vm/docs/mod.rs index e0b78403b93..6bf577b680a 100644 --- a/clarity/src/vm/docs/mod.rs +++ b/clarity/src/vm/docs/mod.rs @@ -41,7 +41,9 @@ pub struct KeywordAPI { pub description: &'static str, pub example: &'static str, /// The version where this keyword was first introduced. - pub version: ClarityVersion, + pub min_version: ClarityVersion, + /// The version where this keyword was disabled. + pub max_version: Option, } #[derive(Serialize, Clone)] @@ -63,7 +65,9 @@ pub struct FunctionAPI { pub description: String, pub example: String, /// The version where this keyword was first introduced. - pub version: ClarityVersion, + pub min_version: ClarityVersion, + /// The version where this keyword was disabled. + pub max_version: Option, } pub struct SimpleFunctionAPI { @@ -96,17 +100,19 @@ const BLOCK_HEIGHT: SimpleKeywordAPI = SimpleKeywordAPI { name: "block-height", snippet: "block-height", output_type: "uint", - description: "Returns the current block height of the Stacks blockchain as an uint", + description: "Returns the current block height of the Stacks blockchain in Clarity 1 and 2. +Upon activation of epoch 3.0, `block-height` will return the same value as `tenure-height`. +In Clarity 3, `block-height` is removed and has been replaced with `stacks-block-height`.", example: - "(> block-height 1000) ;; returns true if the current block-height has passed 1000 blocks.", + "(> block-height u1000) ;; returns true if the current block-height has passed 1000 blocks.", }; const BURN_BLOCK_HEIGHT: SimpleKeywordAPI = SimpleKeywordAPI { name: "burn-block-height", snippet: "burn-block-height", output_type: "uint", - description: "Returns the current block height of the underlying burn blockchain as a uint", - example: "(> burn-block-height 1000) ;; returns true if the current height of the underlying burn blockchain has passed 1000 blocks.", + description: "Returns the current block height of the underlying burn blockchain.", + example: "(> burn-block-height u832000) ;; returns true if the current height of the underlying burn blockchain has passed 832,000 blocks.", }; const CONTRACT_CALLER_KEYWORD: SimpleKeywordAPI = SimpleKeywordAPI { @@ -120,6 +126,25 @@ to the same contract principal.", example: "(print contract-caller) ;; Will print out a Stacks address of the transaction sender", }; +const STACKS_BLOCK_HEIGHT_KEYWORD: SimpleKeywordAPI = SimpleKeywordAPI { + name: "stacks-block-height", + snippet: "stacks-block-height", + output_type: "uint", + description: "Returns the current block height of the Stacks blockchain.", + example: + "(<= stacks-block-height u500000) ;; returns true if the current block-height has not passed 500,000 blocks.", +}; + +const TENURE_HEIGHT_KEYWORD: SimpleKeywordAPI = SimpleKeywordAPI { + name: "tenure-height", + snippet: "tenure-height", + output_type: "uint", + description: "Returns the number of tenures that have passed. +At the start of epoch 3.0, `tenure-height` will return the same value as `block-height`, then it will continue to increase as each tenures passes.", + example: + "(< tenure-height u140000) ;; returns true if the current tenure-height has passed 140,000 blocks.", +}; + const TX_SENDER_KEYWORD: SimpleKeywordAPI = SimpleKeywordAPI { name: "tx-sender", snippet: "tx-sender", @@ -245,7 +270,7 @@ const BUFF_TO_UINT_LE_API: SimpleFunctionAPI = SimpleFunctionAPI { name: None, snippet: "buff-to-uint-le ${1:buff}", signature: "(buff-to-uint-le (buff 16))", - description: "Converts a byte buffer to an unsigned integer use a little-endian encoding.. + description: "Converts a byte buffer to an unsigned integer use a little-endian encoding. The byte buffer can be up to 16 bytes in length. If there are fewer than 16 bytes, as this function uses a little-endian encoding, the input behaves as if it is zero-padded on the _right_. @@ -857,7 +882,8 @@ fn make_for_simple_native( signature: api.signature.to_string(), description: api.description.to_string(), example: api.example.to_string(), - version: function.get_version(), + min_version: function.get_min_version(), + max_version: function.get_max_version(), } } @@ -1701,40 +1727,44 @@ const GET_BLOCK_INFO_API: SpecialAPI = SpecialAPI { snippet: "get-block-info? ${1:prop} ${2:block-height}", output_type: "(optional buff) | (optional uint)", signature: "(get-block-info? prop-name block-height)", - description: "The `get-block-info?` function fetches data for a block of the given *Stacks* block height. The + description: "In Clarity 3, `get-block-info?` is removed. In its place, `get-stacks-block-info?` can be used to retrieve +information about a Stacks block and `get-tenure-info?` can be used to get information pertaining to the tenure. + +The `get-block-info?` function fetches data for a block of the given *Stacks* block height. The value and type returned are determined by the specified `BlockInfoPropertyName`. If the provided `block-height` does not correspond to an existing block prior to the current block, the function returns `none`. The currently available property names are as follows: -`burnchain-header-hash`: This property returns a `(buff 32)` value containing the header hash of the burnchain (Bitcoin) block that selected the +- `burnchain-header-hash`: This property returns a `(buff 32)` value containing the header hash of the burnchain (Bitcoin) block that selected the Stacks block at the given Stacks chain height. -`id-header-hash`: This property returns a `(buff 32)` value containing the _index block hash_ of a Stacks block. This hash is globally unique, and is derived +- `id-header-hash`: This property returns a `(buff 32)` value containing the _index block hash_ of a Stacks block. This hash is globally unique, and is derived from the block hash and the history of accepted PoX operations. This is also the block hash value you would pass into `(at-block)`. -`header-hash`: This property returns a `(buff 32)` value containing the header hash of a Stacks block, given a Stacks chain height. **WARNING* this hash is +- `header-hash`: This property returns a `(buff 32)` value containing the header hash of a Stacks block, given a Stacks chain height. **WARNING* this hash is not guaranteed to be globally unique, since the same Stacks block can be mined in different PoX forks. If you need global uniqueness, you should use `id-header-hash`. -`miner-address`: This property returns a `principal` value corresponding to the miner of the given block. **WARNING** In Stacks 2.1, this is not guaranteed to +- `miner-address`: This property returns a `principal` value corresponding to the miner of the given block. **WARNING** In Stacks 2.1, this is not guaranteed to be the same `principal` that received the block reward, since Stacks 2.1 supports coinbase transactions that pay the reward to a contract address. This is merely the address of the `principal` that produced the block. -`time`: This property returns a `uint` value of the block header time field. This is a Unix epoch timestamp in seconds -which roughly corresponds to when the block was mined. **Note**: this does not increase monotonically with each block +- `time`: This property returns a `uint` value of the block header time field. This is a Unix epoch timestamp in seconds +which roughly corresponds to when the block was mined. This timestamp comes from the burnchain block. **Note**: this does not increase monotonically with each block and block times are accurate only to within two hours. See [BIP113](https://github.com/bitcoin/bips/blob/master/bip-0113.mediawiki) for more information. +For blocks mined after epoch 3.0, all Stacks blocks in one tenure will share the same timestamp. To get the Stacks block time for a block in epoch 3.0+, use `get-stacks-block-info?`. -New in Stacks 2.1: +- `vrf-seed`: This property returns a `(buff 32)` value of the VRF seed for the corresponding block. -`block-reward`: This property returns a `uint` value for the total block reward of the indicated Stacks block. This value is only available once the reward for +- `block-reward`: This property returns a `uint` value for the total block reward of the indicated Stacks block. This value is only available once the reward for the block matures. That is, the latest `block-reward` value available is at least 101 Stacks blocks in the past (on mainnet). The reward includes the coinbase, the anchored block's transaction fees, and the shares of the confirmed and produced microblock transaction fees earned by this block's miner. Note that this value may be smaller than the Stacks coinbase at this height, because the miner may have been punished with a valid `PoisonMicroblock` transaction in the event that the miner -published two or more microblock stream forks. +published two or more microblock stream forks. Added in Clarity 2. -`miner-spend-total`: This property returns a `uint` value for the total number of burnchain tokens (i.e. satoshis) spent by all miners trying to win this block. +- `miner-spend-total`: This property returns a `uint` value for the total number of burnchain tokens (i.e. satoshis) spent by all miners trying to win this block. Added in Clarity 2. -`miner-spend-winner`: This property returns a `uint` value for the number of burnchain tokens (i.e. satoshis) spent by the winning miner for this Stacks block. Note that -this value is less than or equal to the value for `miner-spend-total` at the same block height. +- `miner-spend-winner`: This property returns a `uint` value for the number of burnchain tokens (i.e. satoshis) spent by the winning miner for this Stacks block. Note that +this value is less than or equal to the value for `miner-spend-total` at the same block height. Added in Clarity 2. ", example: "(get-block-info? time u0) ;; Returns (some u1557860301) (get-block-info? header-hash u0) ;; Returns (some 0x374708fff7719dd5979ec875d56cd2286f6d3cf7ec317a3b25632aab28ec37bb) @@ -1779,6 +1809,74 @@ The `addrs` list contains the same PoX address values passed into the PoX smart " }; +const GET_STACKS_BLOCK_INFO_API: SpecialAPI = SpecialAPI { + input_type: "StacksBlockInfoPropertyName, uint", + snippet: "get-stacks-block-info? ${1:prop} ${2:block-height}", + output_type: "(optional buff) | (optional uint)", + signature: "(get-stacks-block-info? prop-name block-height)", + description: "The `get-stacks-block-info?` function fetches data for a block of the given *Stacks* block height. The +value and type returned are determined by the specified `StacksBlockInfoPropertyName`. If the provided `block-height` does +not correspond to an existing block prior to the current block, the function returns `none`. The currently available property names +are as follows: + +- `id-header-hash`: This property returns a `(buff 32)` value containing the _index block hash_ of a Stacks block. This hash is globally unique, and is derived +from the block hash and the history of accepted PoX operations. This is also the block hash value you would pass into `(at-block)`. + +- `header-hash`: This property returns a `(buff 32)` value containing the header hash of a Stacks block, given a Stacks chain height. **WARNING* this hash is +not guaranteed to be globally unique, since the same Stacks block can be mined in different PoX forks. If you need global uniqueness, you should use `id-header-hash`. + +- `time`: This property returns a `uint` value of the block header time field. This is a Unix epoch timestamp in seconds +which roughly corresponds to when the block was mined. For a block mined before epoch 3.0, this timestamp comes from the burnchain block. **Note**: this does not increase monotonically with each block +and block times are accurate only to within two hours. See [BIP113](https://github.com/bitcoin/bips/blob/master/bip-0113.mediawiki) for more information. +For a block mined after epoch 3.0, this timestamp comes from the Stacks block header. **Note**: this is the time, according to the miner, when +the mining of this block started, but is not guaranteed to be accurate. This time will be validated by the signers to be: + - Greater than the timestamp of the previous block + - Less than 15 seconds into the future (according to their own local clocks) +", + example: "(get-stacks-block-info? time u0) ;; Returns (some u1557860301) +(get-stacks-block-info? header-hash u0) ;; Returns (some 0x374708fff7719dd5979ec875d56cd2286f6d3cf7ec317a3b25632aab28ec37bb) +" +}; + +const GET_TENURE_INFO_API: SpecialAPI = SpecialAPI { + input_type: "TenureInfoPropertyName, uint", + snippet: "get-tenure-info? ${1:prop} ${2:block-height}", + output_type: "(optional buff) | (optional uint)", + signature: "(get-tenure-info? prop-name block-height)", + description: "The `get-tenure-info?` function fetches data for the tenure at the given block height. The +value and type returned are determined by the specified `TenureInfoPropertyName`. If the provided `block-height` does +not correspond to an existing block prior to the current block, the function returns `none`. The currently available property names +are as follows: + +- `burnchain-header-hash`: This property returns a `(buff 32)` value containing the header hash of the burnchain (Bitcoin) block that selected the +tenure at the given height. + +- `miner-address`: This property returns a `principal` value corresponding to the miner of the given tenure. **WARNING** This is not guaranteed to +be the same `principal` that received the block reward, since Stacks 2.1+ supports coinbase transactions that pay the reward to a contract address. This is merely +the address of the `principal` that produced the tenure. + +- `time`: This property returns a `uint` Unix epoch timestamp in seconds which roughly corresponds to when the tenure was started. This timestamp comes +from the burnchain block. **Note**: this does not increase monotonically with each tenure and tenure times are accurate only to within two hours. See +[BIP113](https://github.com/bitcoin/bips/blob/master/bip-0113.mediawiki) for more information. + +- `vrf-seed`: This property returns a `(buff 32)` value of the VRF seed for the corresponding tenure. + +- `block-reward`: This property returns a `uint` value for the total block reward of the indicated tenure. This value is only available once the reward for +the tenure matures. That is, the latest `block-reward` value available is at least 101 Stacks blocks in the past (on mainnet). The reward includes the coinbase, +the anchored tenure's transaction fees, and the shares of the confirmed and produced microblock transaction fees earned by this block's miner. Note that this value may +be smaller than the Stacks coinbase at this height, because the miner may have been punished with a valid `PoisonMicroblock` transaction in the event that the miner +published two or more microblock stream forks. + +- `miner-spend-total`: This property returns a `uint` value for the total number of burnchain tokens (i.e. satoshis) spent by all miners trying to win this tenure. + +- `miner-spend-winner`: This property returns a `uint` value for the number of burnchain tokens (i.e. satoshis) spent by the winning miner for this tennure. Note that +this value is less than or equal to the value for `miner-spend-total` at the same tenure height. +", + example: "(get-tenure-info? time u0) ;; Returns (some u1557860301) +(get-tenure-info? vrf-seed u0) ;; Returns (some 0xf490de2920c8a35fabeb13208852aa28c76f9be9b03a4dd2b3c075f7a26923b4) +" +}; + const PRINCIPAL_CONSTRUCT_API: SpecialAPI = SpecialAPI { input_type: "(buff 1), (buff 20), [(string-ascii 40)]", output_type: "(response principal { error_code: uint, value: (optional principal) })", @@ -2089,7 +2187,7 @@ const MINT_TOKEN: SpecialAPI = SpecialAPI { type defined using `define-fungible-token`. The increased token balance is _not_ transfered from another principal, but rather minted. -If a non-positive amount is provided to mint, this function returns `(err 1)`. Otherwise, on successfuly mint, it +If a non-positive amount is provided to mint, this function returns `(err 1)`. Otherwise, on successfully mint, it returns `(ok true)`. If this call would result in more supplied tokens than defined by the total supply in `define-fungible-token`, then a `SupplyOverflow` runtime error is thrown. ", @@ -2495,6 +2593,8 @@ pub fn make_api_reference(function: &NativeFunctions) -> FunctionAPI { AsContract => make_for_special(&AS_CONTRACT_API, function), GetBlockInfo => make_for_special(&GET_BLOCK_INFO_API, function), GetBurnBlockInfo => make_for_special(&GET_BURN_BLOCK_INFO_API, function), + GetStacksBlockInfo => make_for_special(&GET_STACKS_BLOCK_INFO_API, function), + GetTenureInfo => make_for_special(&GET_TENURE_INFO_API, function), ConsOkay => make_for_special(&CONS_OK_API, function), ConsError => make_for_special(&CONS_ERR_API, function), ConsSome => make_for_special(&CONS_SOME_API, function), @@ -2538,13 +2638,15 @@ pub fn make_api_reference(function: &NativeFunctions) -> FunctionAPI { } fn make_keyword_reference(variable: &NativeVariables) -> Option { - let simple_api = match variable { + let keyword = match variable { NativeVariables::TxSender => TX_SENDER_KEYWORD.clone(), NativeVariables::ContractCaller => CONTRACT_CALLER_KEYWORD.clone(), NativeVariables::NativeNone => NONE_KEYWORD.clone(), NativeVariables::NativeTrue => TRUE_KEYWORD.clone(), NativeVariables::NativeFalse => FALSE_KEYWORD.clone(), NativeVariables::BlockHeight => BLOCK_HEIGHT.clone(), + NativeVariables::StacksBlockHeight => STACKS_BLOCK_HEIGHT_KEYWORD.clone(), + NativeVariables::TenureHeight => TENURE_HEIGHT_KEYWORD.clone(), NativeVariables::BurnBlockHeight => BURN_BLOCK_HEIGHT.clone(), NativeVariables::TotalLiquidMicroSTX => TOTAL_LIQUID_USTX_KEYWORD.clone(), NativeVariables::Regtest => REGTEST_KEYWORD.clone(), @@ -2553,12 +2655,13 @@ fn make_keyword_reference(variable: &NativeVariables) -> Option { NativeVariables::TxSponsor => TX_SPONSOR_KEYWORD.clone(), }; Some(KeywordAPI { - name: simple_api.name, - snippet: simple_api.snippet, - output_type: simple_api.output_type, - description: simple_api.description, - example: simple_api.example, - version: variable.get_version(), + name: keyword.name, + snippet: keyword.snippet, + output_type: keyword.output_type, + description: keyword.description, + example: keyword.example, + min_version: variable.get_min_version(), + max_version: variable.get_max_version(), }) } @@ -2571,7 +2674,8 @@ fn make_for_special(api: &SpecialAPI, function: &NativeFunctions) -> FunctionAPI signature: api.signature.to_string(), description: api.description.to_string(), example: api.example.to_string(), - version: function.get_version(), + min_version: function.get_min_version(), + max_version: function.get_max_version(), } } @@ -2584,7 +2688,8 @@ fn make_for_define(api: &DefineAPI, name: String) -> FunctionAPI { signature: api.signature.to_string(), description: api.description.to_string(), example: api.example.to_string(), - version: ClarityVersion::Clarity1, + min_version: ClarityVersion::Clarity1, + max_version: None, } } @@ -2641,12 +2746,12 @@ pub fn make_json_api_reference() -> String { #[cfg(test)] mod test { use stacks_common::address::AddressHashMode; - use stacks_common::consts::CHAIN_ID_TESTNET; + use stacks_common::consts::{CHAIN_ID_TESTNET, PEER_VERSION_EPOCH_2_1}; use stacks_common::types::chainstate::{ BlockHeaderHash, BurnchainHeaderHash, ConsensusHash, SortitionId, StacksAddress, StacksBlockId, VRFSeed, }; - use stacks_common::types::{Address, StacksEpochId, PEER_VERSION_EPOCH_2_1}; + use stacks_common::types::{Address, StacksEpochId}; use stacks_common::util::hash::hex_bytes; use super::{get_input_type_string, make_all_api_reference, make_json_api_reference}; @@ -2684,10 +2789,18 @@ mod test { ) -> Option { None } - fn get_consensus_hash_for_block(&self, _bhh: &StacksBlockId) -> Option { + fn get_consensus_hash_for_block( + &self, + _bhh: &StacksBlockId, + _epoch: &StacksEpochId, + ) -> Option { Some(ConsensusHash([0; 20])) } - fn get_vrf_seed_for_block(&self, _bhh: &StacksBlockId) -> Option { + fn get_vrf_seed_for_block( + &self, + _bhh: &StacksBlockId, + epoch: &StacksEpochId, + ) -> Option { Some( VRFSeed::from_hex( "f490de2920c8a35fabeb13208852aa28c76f9be9b03a4dd2b3c075f7a26923b4", @@ -2698,6 +2811,7 @@ mod test { fn get_stacks_block_header_hash_for_block( &self, _id_bhh: &StacksBlockId, + _epoch: &StacksEpochId, ) -> Option { Some( BlockHeaderHash::from_hex( @@ -2706,27 +2820,47 @@ mod test { .unwrap(), ) } - fn get_burn_block_time_for_block(&self, _id_bhh: &StacksBlockId) -> Option { + fn get_burn_block_time_for_block( + &self, + _id_bhh: &StacksBlockId, + _epoch: Option<&StacksEpochId>, + ) -> Option { Some(1557860301) } + fn get_stacks_block_time_for_block(&self, _id_bhh: &StacksBlockId) -> Option { + Some(1557860302) + } fn get_burn_block_height_for_block(&self, _id_bhh: &StacksBlockId) -> Option { Some(567890) } - fn get_miner_address(&self, _id_bhh: &StacksBlockId) -> Option { + fn get_miner_address( + &self, + _id_bhh: &StacksBlockId, + _epoch: &StacksEpochId, + ) -> Option { None } - fn get_burnchain_tokens_spent_for_block(&self, id_bhh: &StacksBlockId) -> Option { + fn get_burnchain_tokens_spent_for_block( + &self, + id_bhh: &StacksBlockId, + _epoch: &StacksEpochId, + ) -> Option { Some(12345) } fn get_burnchain_tokens_spent_for_winning_block( &self, id_bhh: &StacksBlockId, + _epoch: &StacksEpochId, ) -> Option { Some(2345) } - fn get_tokens_earned_for_block(&self, id_bhh: &StacksBlockId) -> Option { + fn get_tokens_earned_for_block( + &self, + id_bhh: &StacksBlockId, + _epoch: &StacksEpochId, + ) -> Option { Some(12000) } } @@ -2735,6 +2869,14 @@ mod test { const DOC_POX_STATE_DB: DocBurnStateDB = DocBurnStateDB {}; impl BurnStateDB for DocBurnStateDB { + fn get_tip_burn_block_height(&self) -> Option { + Some(0x9abc) + } + + fn get_tip_sortition_id(&self) -> Option { + Some(SortitionId([0u8; 32])) + } + fn get_burn_block_height(&self, _sortition_id: &SortitionId) -> Option { Some(5678) } diff --git a/clarity/src/vm/errors.rs b/clarity/src/vm/errors.rs index 55977ec6aa5..b3b0ca5feac 100644 --- a/clarity/src/vm/errors.rs +++ b/clarity/src/vm/errors.rs @@ -17,6 +17,7 @@ use std::error::Error as ErrorTrait; use std::{error, fmt}; +#[cfg(feature = "canonical")] use rusqlite::Error as SqliteError; use serde_json::Error as SerdeJSONErr; use stacks_common::types::chainstate::BlockHeaderHash; @@ -56,6 +57,7 @@ pub enum InterpreterError { UninitializedPersistedVariable, FailedToConstructAssetTable, FailedToConstructEventBatch, + #[cfg(feature = "canonical")] SqliteError(IncomparableError), BadFileName, FailedToCreateDataDirectory, diff --git a/clarity/src/vm/functions/database.rs b/clarity/src/vm/functions/database.rs index b047faf682b..f048a595367 100644 --- a/clarity/src/vm/functions/database.rs +++ b/clarity/src/vm/functions/database.rs @@ -32,9 +32,9 @@ use crate::vm::functions::tuples; use crate::vm::representations::{SymbolicExpression, SymbolicExpressionType}; use crate::vm::types::{ BlockInfoProperty, BuffData, BurnBlockInfoProperty, OptionalData, PrincipalData, SequenceData, - TupleData, TypeSignature, Value, BUFF_32, + StacksBlockInfoProperty, TenureInfoProperty, TupleData, TypeSignature, Value, BUFF_32, }; -use crate::vm::{eval, Environment, LocalContext}; +use crate::vm::{eval, ClarityVersion, Environment, LocalContext}; switch_on_global_epoch!(special_fetch_variable( special_fetch_variable_v200, @@ -717,12 +717,32 @@ pub fn special_delete_entry_v205( result.map(|data| data.value) } +/// Handles the `get-block-info?` special function. +/// Interprets `args` as variables `[property-name, block-height]`, and returns +/// a property value determined by `property-name`: +/// - `id-header-hash` returns the index block hash at `block-height` +/// - `header-hash` returns the header hash at `block-height` +/// - `time` returns the burn block time of the block at `block-height` +/// - `vrf-seed` returns the VRF seed of the block at `block-height` +/// - `burnchain-header-hash` returns header hash of the burnchain block corresponding to `block-height` +/// - `miner-address` returns the address of the principal that mined the block at `block-height` +/// - `miner-spend-winner` returns the number of satoshis spent by the winning miner for the block at `block-height` +/// - `miner-spend-total` returns the total number of satoshis spent by all miners for the block at `block-height` +/// - `block-reward` returns the block reward for the block at `block-height` + +/// +/// # Errors: +/// - CheckErrors::IncorrectArgumentCount if there aren't 2 arguments. +/// - CheckErrors::GetStacksBlockInfoExpectPropertyName if `args[0]` isn't a ClarityName. +/// - CheckErrors::NoSuchStacksBlockInfoProperty if `args[0]` isn't a StacksBlockInfoProperty. +/// - CheckErrors::TypeValueError if `args[1]` isn't a `uint`. + pub fn special_get_block_info( args: &[SymbolicExpression], env: &mut Environment, context: &LocalContext, ) -> Result { - // (get-block-info? property-name block-height-int) + // (get-block-info? property-name block-height-uint) runtime_cost(ClarityCostFunction::BlockInfo, env, 0)?; check_argument_count(2, args)?; @@ -732,11 +752,10 @@ pub fn special_get_block_info( .match_atom() .ok_or(CheckErrors::GetBlockInfoExpectPropertyName)?; - let block_info_prop = BlockInfoProperty::lookup_by_name_at_version( - property_name, - env.contract_context.get_clarity_version(), - ) - .ok_or(CheckErrors::GetBlockInfoExpectPropertyName)?; + let version = env.contract_context.get_clarity_version(); + + let block_info_prop = BlockInfoProperty::lookup_by_name_at_version(property_name, version) + .ok_or(CheckErrors::GetBlockInfoExpectPropertyName)?; // Handle the block-height input arg clause. let height_eval = eval(&args[1], env, context)?; @@ -757,7 +776,10 @@ pub fn special_get_block_info( let result = match block_info_prop { BlockInfoProperty::Time => { - let block_time = env.global_context.database.get_block_time(height_value)?; + let block_time = env + .global_context + .database + .get_burn_block_time(height_value, None)?; Value::UInt(u128::from(block_time)) } BlockInfoProperty::VrfSeed => { @@ -830,6 +852,7 @@ pub fn special_get_block_info( Value::some(result) } +/// Handles the `get-burn-block-info?` special function. /// Interprets `args` as variables `[property_name, burn_block_height]`, and returns /// a property value determined by `property_name`: /// - `header_hash` returns the burn block header hash at `burn_block_height` @@ -924,3 +947,188 @@ pub fn special_get_burn_block_info( } } } + +/// Handles the `get-stacks-block-info?` special function. +/// Interprets `args` as variables `[property-name, block-height]`, and returns +/// a property value determined by `property-name`: +/// - `id-header-hash` returns the index block hash at `block-height` +/// - `header-hash` returns the header hash at `block-height` +/// - `time` returns the block time at `block-height` +/// +/// # Errors: +/// - CheckErrors::IncorrectArgumentCount if there aren't 2 arguments. +/// - CheckErrors::GetStacksBlockInfoExpectPropertyName if `args[0]` isn't a ClarityName. +/// - CheckErrors::NoSuchStacksBlockInfoProperty if `args[0]` isn't a StacksBlockInfoProperty. +/// - CheckErrors::TypeValueError if `args[1]` isn't a `uint`. +pub fn special_get_stacks_block_info( + args: &[SymbolicExpression], + env: &mut Environment, + context: &LocalContext, +) -> Result { + // (get-stacks-block-info? property-name block-height-uint) + runtime_cost(ClarityCostFunction::BlockInfo, env, 0)?; + + check_argument_count(2, args)?; + + // Handle the block property name input arg. + let property_name = args[0] + .match_atom() + .ok_or(CheckErrors::GetStacksBlockInfoExpectPropertyName)?; + + let block_info_prop = StacksBlockInfoProperty::lookup_by_name(property_name).ok_or( + CheckErrors::NoSuchStacksBlockInfoProperty(property_name.to_string()), + )?; + + // Handle the block-height input arg. + let height_eval = eval(&args[1], env, context)?; + let height_value = match height_eval { + Value::UInt(result) => Ok(result), + x => Err(CheckErrors::TypeValueError(TypeSignature::UIntType, x)), + }?; + + let Ok(height_value) = u32::try_from(height_value) else { + return Ok(Value::none()); + }; + + let current_block_height = env.global_context.database.get_current_block_height(); + if height_value >= current_block_height { + return Ok(Value::none()); + } + + let result = match block_info_prop { + StacksBlockInfoProperty::Time => { + let block_time = env.global_context.database.get_block_time(height_value)?; + Value::UInt(u128::from(block_time)) + } + StacksBlockInfoProperty::HeaderHash => { + let header_hash = env + .global_context + .database + .get_block_header_hash(height_value)?; + Value::Sequence(SequenceData::Buffer(BuffData { + data: header_hash.as_bytes().to_vec(), + })) + } + StacksBlockInfoProperty::IndexHeaderHash => { + let id_header_hash = env + .global_context + .database + .get_index_block_header_hash(height_value)?; + Value::Sequence(SequenceData::Buffer(BuffData { + data: id_header_hash.as_bytes().to_vec(), + })) + } + }; + + Value::some(result) +} + +/// Handles the function `get-tenure-info?` special function. +/// Interprets `args` as variables `[property-name, block-height]`, and returns +/// a property value determined by `property-name`: +/// - `time` returns the burn block time for the tenure of which `block-height` is a part +/// - `vrf-seed` returns the VRF seed for the tenure of which `block-height` is a part +/// - `burnchain-header-hash` returns header hash of the burnchain block corresponding to the tenure of which `block-height` is a part +/// - `miner-address` returns the address of the principal that mined the tenure of which `block-height` is a part +/// - `miner-spend-winner` returns the number of satoshis spent by the winning miner for the tenure of which `block-height` is a part +/// - `miner-spend-total` returns the total number of satoshis spent by all miners for the tenure of which `block-height` is a part +/// - `block-reward` returns the block reward for the tenure of which `block-height` is a part +/// +/// # Errors: +/// - CheckErrors::IncorrectArgumentCount if there aren't 2 arguments. +/// - CheckErrors::GetTenureInfoExpectPropertyName if `args[0]` isn't a ClarityName. +/// - CheckErrors::NoSuchTenureInfoProperty if `args[0]` isn't a TenureInfoProperty. +/// - CheckErrors::TypeValueError if `args[1]` isn't a `uint`. +pub fn special_get_tenure_info( + args: &[SymbolicExpression], + env: &mut Environment, + context: &LocalContext, +) -> Result { + // (get-tenure-info? property-name block-height-uint) + runtime_cost(ClarityCostFunction::BlockInfo, env, 0)?; + + check_argument_count(2, args)?; + + // Handle the block property name input arg. + let property_name = args[0] + .match_atom() + .ok_or(CheckErrors::GetTenureInfoExpectPropertyName)?; + + let block_info_prop = TenureInfoProperty::lookup_by_name(property_name) + .ok_or(CheckErrors::GetTenureInfoExpectPropertyName)?; + + // Handle the block-height input arg. + let height_eval = eval(&args[1], env, context)?; + let height_value = match height_eval { + Value::UInt(result) => Ok(result), + x => Err(CheckErrors::TypeValueError(TypeSignature::UIntType, x)), + }?; + + let Ok(height_value) = u32::try_from(height_value) else { + return Ok(Value::none()); + }; + + let current_height = env.global_context.database.get_current_block_height(); + if height_value >= current_height { + return Ok(Value::none()); + } + + let result = match block_info_prop { + TenureInfoProperty::Time => { + let block_time = env + .global_context + .database + .get_burn_block_time(height_value, None)?; + Value::UInt(u128::from(block_time)) + } + TenureInfoProperty::VrfSeed => { + let vrf_seed = env + .global_context + .database + .get_block_vrf_seed(height_value)?; + Value::Sequence(SequenceData::Buffer(BuffData { + data: vrf_seed.as_bytes().to_vec(), + })) + } + TenureInfoProperty::BurnchainHeaderHash => { + let burnchain_header_hash = env + .global_context + .database + .get_burnchain_block_header_hash(height_value)?; + Value::Sequence(SequenceData::Buffer(BuffData { + data: burnchain_header_hash.as_bytes().to_vec(), + })) + } + TenureInfoProperty::MinerAddress => { + let miner_address = env + .global_context + .database + .get_miner_address(height_value)?; + Value::from(miner_address) + } + TenureInfoProperty::MinerSpendWinner => { + let winner_spend = env + .global_context + .database + .get_miner_spend_winner(height_value)?; + Value::UInt(winner_spend) + } + TenureInfoProperty::MinerSpendTotal => { + let total_spend = env + .global_context + .database + .get_miner_spend_total(height_value)?; + Value::UInt(total_spend) + } + TenureInfoProperty::BlockReward => { + // this is already an optional + let block_reward_opt = env.global_context.database.get_block_reward(height_value)?; + return Ok(match block_reward_opt { + Some(x) => Value::some(Value::UInt(x))?, + None => Value::none(), + }); + } + }; + + Value::some(result) +} diff --git a/clarity/src/vm/functions/mod.rs b/clarity/src/vm/functions/mod.rs index 7c3647c2f63..833ed4baf85 100644 --- a/clarity/src/vm/functions/mod.rs +++ b/clarity/src/vm/functions/mod.rs @@ -84,132 +84,119 @@ pub mod principals; mod sequences; pub mod tuples; -define_versioned_named_enum!(NativeFunctions(ClarityVersion) { - Add("+", ClarityVersion::Clarity1), - Subtract("-", ClarityVersion::Clarity1), - Multiply("*", ClarityVersion::Clarity1), - Divide("/", ClarityVersion::Clarity1), - CmpGeq(">=", ClarityVersion::Clarity1), - CmpLeq("<=", ClarityVersion::Clarity1), - CmpLess("<", ClarityVersion::Clarity1), - CmpGreater(">", ClarityVersion::Clarity1), - ToInt("to-int", ClarityVersion::Clarity1), - ToUInt("to-uint", ClarityVersion::Clarity1), - Modulo("mod", ClarityVersion::Clarity1), - Power("pow", ClarityVersion::Clarity1), - Sqrti("sqrti", ClarityVersion::Clarity1), - Log2("log2", ClarityVersion::Clarity1), - BitwiseXor("xor", ClarityVersion::Clarity1), - And("and", ClarityVersion::Clarity1), - Or("or", ClarityVersion::Clarity1), - Not("not", ClarityVersion::Clarity1), - Equals("is-eq", ClarityVersion::Clarity1), - If("if", ClarityVersion::Clarity1), - Let("let", ClarityVersion::Clarity1), - Map("map", ClarityVersion::Clarity1), - Fold("fold", ClarityVersion::Clarity1), - Append("append", ClarityVersion::Clarity1), - Concat("concat", ClarityVersion::Clarity1), - AsMaxLen("as-max-len?", ClarityVersion::Clarity1), - Len("len", ClarityVersion::Clarity1), - ElementAt("element-at", ClarityVersion::Clarity1), - ElementAtAlias("element-at?", ClarityVersion::Clarity2), - IndexOf("index-of", ClarityVersion::Clarity1), - IndexOfAlias("index-of?", ClarityVersion::Clarity2), - BuffToIntLe("buff-to-int-le", ClarityVersion::Clarity2), - BuffToUIntLe("buff-to-uint-le", ClarityVersion::Clarity2), - BuffToIntBe("buff-to-int-be", ClarityVersion::Clarity2), - BuffToUIntBe("buff-to-uint-be", ClarityVersion::Clarity2), - IsStandard("is-standard", ClarityVersion::Clarity2), - PrincipalDestruct("principal-destruct?", ClarityVersion::Clarity2), - PrincipalConstruct("principal-construct?", ClarityVersion::Clarity2), - StringToInt("string-to-int?", ClarityVersion::Clarity2), - StringToUInt("string-to-uint?", ClarityVersion::Clarity2), - IntToAscii("int-to-ascii", ClarityVersion::Clarity2), - IntToUtf8("int-to-utf8", ClarityVersion::Clarity2), - ListCons("list", ClarityVersion::Clarity1), - FetchVar("var-get", ClarityVersion::Clarity1), - SetVar("var-set", ClarityVersion::Clarity1), - FetchEntry("map-get?", ClarityVersion::Clarity1), - SetEntry("map-set", ClarityVersion::Clarity1), - InsertEntry("map-insert", ClarityVersion::Clarity1), - DeleteEntry("map-delete", ClarityVersion::Clarity1), - TupleCons("tuple", ClarityVersion::Clarity1), - TupleGet("get", ClarityVersion::Clarity1), - TupleMerge("merge", ClarityVersion::Clarity1), - Begin("begin", ClarityVersion::Clarity1), - Hash160("hash160", ClarityVersion::Clarity1), - Sha256("sha256", ClarityVersion::Clarity1), - Sha512("sha512", ClarityVersion::Clarity1), - Sha512Trunc256("sha512/256", ClarityVersion::Clarity1), - Keccak256("keccak256", ClarityVersion::Clarity1), - Secp256k1Recover("secp256k1-recover?", ClarityVersion::Clarity1), - Secp256k1Verify("secp256k1-verify", ClarityVersion::Clarity1), - Print("print", ClarityVersion::Clarity1), - ContractCall("contract-call?", ClarityVersion::Clarity1), - AsContract("as-contract", ClarityVersion::Clarity1), - ContractOf("contract-of", ClarityVersion::Clarity1), - PrincipalOf("principal-of?", ClarityVersion::Clarity1), - AtBlock("at-block", ClarityVersion::Clarity1), - GetBlockInfo("get-block-info?", ClarityVersion::Clarity1), - GetBurnBlockInfo("get-burn-block-info?", ClarityVersion::Clarity2), - ConsError("err", ClarityVersion::Clarity1), - ConsOkay("ok", ClarityVersion::Clarity1), - ConsSome("some", ClarityVersion::Clarity1), - DefaultTo("default-to", ClarityVersion::Clarity1), - Asserts("asserts!", ClarityVersion::Clarity1), - UnwrapRet("unwrap!", ClarityVersion::Clarity1), - UnwrapErrRet("unwrap-err!", ClarityVersion::Clarity1), - Unwrap("unwrap-panic", ClarityVersion::Clarity1), - UnwrapErr("unwrap-err-panic", ClarityVersion::Clarity1), - Match("match", ClarityVersion::Clarity1), - TryRet("try!", ClarityVersion::Clarity1), - IsOkay("is-ok", ClarityVersion::Clarity1), - IsNone("is-none", ClarityVersion::Clarity1), - IsErr("is-err", ClarityVersion::Clarity1), - IsSome("is-some", ClarityVersion::Clarity1), - Filter("filter", ClarityVersion::Clarity1), - GetTokenBalance("ft-get-balance", ClarityVersion::Clarity1), - GetAssetOwner("nft-get-owner?", ClarityVersion::Clarity1), - TransferToken("ft-transfer?", ClarityVersion::Clarity1), - TransferAsset("nft-transfer?", ClarityVersion::Clarity1), - MintAsset("nft-mint?", ClarityVersion::Clarity1), - MintToken("ft-mint?", ClarityVersion::Clarity1), - GetTokenSupply("ft-get-supply", ClarityVersion::Clarity1), - BurnToken("ft-burn?", ClarityVersion::Clarity1), - BurnAsset("nft-burn?", ClarityVersion::Clarity1), - GetStxBalance("stx-get-balance", ClarityVersion::Clarity1), - StxTransfer("stx-transfer?", ClarityVersion::Clarity1), - StxTransferMemo("stx-transfer-memo?", ClarityVersion::Clarity2), - StxBurn("stx-burn?", ClarityVersion::Clarity1), - StxGetAccount("stx-account", ClarityVersion::Clarity2), - BitwiseAnd("bit-and", ClarityVersion::Clarity2), - BitwiseOr("bit-or", ClarityVersion::Clarity2), - BitwiseNot("bit-not", ClarityVersion::Clarity2), - BitwiseLShift("bit-shift-left", ClarityVersion::Clarity2), - BitwiseRShift("bit-shift-right", ClarityVersion::Clarity2), - BitwiseXor2("bit-xor", ClarityVersion::Clarity2), - Slice("slice?", ClarityVersion::Clarity2), - ToConsensusBuff("to-consensus-buff?", ClarityVersion::Clarity2), - FromConsensusBuff("from-consensus-buff?", ClarityVersion::Clarity2), - ReplaceAt("replace-at?", ClarityVersion::Clarity2), +define_versioned_named_enum_with_max!(NativeFunctions(ClarityVersion) { + Add("+", ClarityVersion::Clarity1, None), + Subtract("-", ClarityVersion::Clarity1, None), + Multiply("*", ClarityVersion::Clarity1, None), + Divide("/", ClarityVersion::Clarity1, None), + CmpGeq(">=", ClarityVersion::Clarity1, None), + CmpLeq("<=", ClarityVersion::Clarity1, None), + CmpLess("<", ClarityVersion::Clarity1, None), + CmpGreater(">", ClarityVersion::Clarity1, None), + ToInt("to-int", ClarityVersion::Clarity1, None), + ToUInt("to-uint", ClarityVersion::Clarity1, None), + Modulo("mod", ClarityVersion::Clarity1, None), + Power("pow", ClarityVersion::Clarity1, None), + Sqrti("sqrti", ClarityVersion::Clarity1, None), + Log2("log2", ClarityVersion::Clarity1, None), + BitwiseXor("xor", ClarityVersion::Clarity1, None), + And("and", ClarityVersion::Clarity1, None), + Or("or", ClarityVersion::Clarity1, None), + Not("not", ClarityVersion::Clarity1, None), + Equals("is-eq", ClarityVersion::Clarity1, None), + If("if", ClarityVersion::Clarity1, None), + Let("let", ClarityVersion::Clarity1, None), + Map("map", ClarityVersion::Clarity1, None), + Fold("fold", ClarityVersion::Clarity1, None), + Append("append", ClarityVersion::Clarity1, None), + Concat("concat", ClarityVersion::Clarity1, None), + AsMaxLen("as-max-len?", ClarityVersion::Clarity1, None), + Len("len", ClarityVersion::Clarity1, None), + ElementAt("element-at", ClarityVersion::Clarity1, None), + ElementAtAlias("element-at?", ClarityVersion::Clarity2, None), + IndexOf("index-of", ClarityVersion::Clarity1, None), + IndexOfAlias("index-of?", ClarityVersion::Clarity2, None), + BuffToIntLe("buff-to-int-le", ClarityVersion::Clarity2, None), + BuffToUIntLe("buff-to-uint-le", ClarityVersion::Clarity2, None), + BuffToIntBe("buff-to-int-be", ClarityVersion::Clarity2, None), + BuffToUIntBe("buff-to-uint-be", ClarityVersion::Clarity2, None), + IsStandard("is-standard", ClarityVersion::Clarity2, None), + PrincipalDestruct("principal-destruct?", ClarityVersion::Clarity2, None), + PrincipalConstruct("principal-construct?", ClarityVersion::Clarity2, None), + StringToInt("string-to-int?", ClarityVersion::Clarity2, None), + StringToUInt("string-to-uint?", ClarityVersion::Clarity2, None), + IntToAscii("int-to-ascii", ClarityVersion::Clarity2, None), + IntToUtf8("int-to-utf8", ClarityVersion::Clarity2, None), + ListCons("list", ClarityVersion::Clarity1, None), + FetchVar("var-get", ClarityVersion::Clarity1, None), + SetVar("var-set", ClarityVersion::Clarity1, None), + FetchEntry("map-get?", ClarityVersion::Clarity1, None), + SetEntry("map-set", ClarityVersion::Clarity1, None), + InsertEntry("map-insert", ClarityVersion::Clarity1, None), + DeleteEntry("map-delete", ClarityVersion::Clarity1, None), + TupleCons("tuple", ClarityVersion::Clarity1, None), + TupleGet("get", ClarityVersion::Clarity1, None), + TupleMerge("merge", ClarityVersion::Clarity1, None), + Begin("begin", ClarityVersion::Clarity1, None), + Hash160("hash160", ClarityVersion::Clarity1, None), + Sha256("sha256", ClarityVersion::Clarity1, None), + Sha512("sha512", ClarityVersion::Clarity1, None), + Sha512Trunc256("sha512/256", ClarityVersion::Clarity1, None), + Keccak256("keccak256", ClarityVersion::Clarity1, None), + Secp256k1Recover("secp256k1-recover?", ClarityVersion::Clarity1, None), + Secp256k1Verify("secp256k1-verify", ClarityVersion::Clarity1, None), + Print("print", ClarityVersion::Clarity1, None), + ContractCall("contract-call?", ClarityVersion::Clarity1, None), + AsContract("as-contract", ClarityVersion::Clarity1, None), + ContractOf("contract-of", ClarityVersion::Clarity1, None), + PrincipalOf("principal-of?", ClarityVersion::Clarity1, None), + AtBlock("at-block", ClarityVersion::Clarity1, None), + GetBlockInfo("get-block-info?", ClarityVersion::Clarity1, Some(ClarityVersion::Clarity2)), + GetBurnBlockInfo("get-burn-block-info?", ClarityVersion::Clarity2, None), + ConsError("err", ClarityVersion::Clarity1, None), + ConsOkay("ok", ClarityVersion::Clarity1, None), + ConsSome("some", ClarityVersion::Clarity1, None), + DefaultTo("default-to", ClarityVersion::Clarity1, None), + Asserts("asserts!", ClarityVersion::Clarity1, None), + UnwrapRet("unwrap!", ClarityVersion::Clarity1, None), + UnwrapErrRet("unwrap-err!", ClarityVersion::Clarity1, None), + Unwrap("unwrap-panic", ClarityVersion::Clarity1, None), + UnwrapErr("unwrap-err-panic", ClarityVersion::Clarity1, None), + Match("match", ClarityVersion::Clarity1, None), + TryRet("try!", ClarityVersion::Clarity1, None), + IsOkay("is-ok", ClarityVersion::Clarity1, None), + IsNone("is-none", ClarityVersion::Clarity1, None), + IsErr("is-err", ClarityVersion::Clarity1, None), + IsSome("is-some", ClarityVersion::Clarity1, None), + Filter("filter", ClarityVersion::Clarity1, None), + GetTokenBalance("ft-get-balance", ClarityVersion::Clarity1, None), + GetAssetOwner("nft-get-owner?", ClarityVersion::Clarity1, None), + TransferToken("ft-transfer?", ClarityVersion::Clarity1, None), + TransferAsset("nft-transfer?", ClarityVersion::Clarity1, None), + MintAsset("nft-mint?", ClarityVersion::Clarity1, None), + MintToken("ft-mint?", ClarityVersion::Clarity1, None), + GetTokenSupply("ft-get-supply", ClarityVersion::Clarity1, None), + BurnToken("ft-burn?", ClarityVersion::Clarity1, None), + BurnAsset("nft-burn?", ClarityVersion::Clarity1, None), + GetStxBalance("stx-get-balance", ClarityVersion::Clarity1, None), + StxTransfer("stx-transfer?", ClarityVersion::Clarity1, None), + StxTransferMemo("stx-transfer-memo?", ClarityVersion::Clarity2, None), + StxBurn("stx-burn?", ClarityVersion::Clarity1, None), + StxGetAccount("stx-account", ClarityVersion::Clarity2, None), + BitwiseAnd("bit-and", ClarityVersion::Clarity2, None), + BitwiseOr("bit-or", ClarityVersion::Clarity2, None), + BitwiseNot("bit-not", ClarityVersion::Clarity2, None), + BitwiseLShift("bit-shift-left", ClarityVersion::Clarity2, None), + BitwiseRShift("bit-shift-right", ClarityVersion::Clarity2, None), + BitwiseXor2("bit-xor", ClarityVersion::Clarity2, None), + Slice("slice?", ClarityVersion::Clarity2, None), + ToConsensusBuff("to-consensus-buff?", ClarityVersion::Clarity2, None), + FromConsensusBuff("from-consensus-buff?", ClarityVersion::Clarity2, None), + ReplaceAt("replace-at?", ClarityVersion::Clarity2, None), + GetStacksBlockInfo("get-stacks-block-info?", ClarityVersion::Clarity3, None), + GetTenureInfo("get-tenure-info?", ClarityVersion::Clarity3, None), }); -impl NativeFunctions { - pub fn lookup_by_name_at_version( - name: &str, - version: &ClarityVersion, - ) -> Option { - NativeFunctions::lookup_by_name(name).and_then(|native_function| { - if &native_function.get_version() <= version { - Some(native_function) - } else { - None - } - }) - } -} - /// /// Returns a callable for the given native function if it exists in the provided /// ClarityVersion @@ -436,6 +423,14 @@ pub fn lookup_reserved_functions(name: &str, version: &ClarityVersion) -> Option "special_get_burn_block_info", &database::special_get_burn_block_info, ), + GetStacksBlockInfo => SpecialFunction( + "special_get_stacks_block_info", + &database::special_get_stacks_block_info, + ), + GetTenureInfo => SpecialFunction( + "special_get_tenure_info", + &database::special_get_tenure_info, + ), ConsSome => NativeFunction( "native_some", NativeHandle::SingleArg(&options::native_some), diff --git a/clarity/src/vm/mod.rs b/clarity/src/vm/mod.rs index bca5223828f..d64b2075225 100644 --- a/clarity/src/vm/mod.rs +++ b/clarity/src/vm/mod.rs @@ -41,6 +41,9 @@ pub mod coverage; pub mod events; +#[cfg(feature = "canonical")] +pub mod tooling; + #[cfg(any(test, feature = "testing"))] pub mod tests; diff --git a/clarity/src/vm/test_util/mod.rs b/clarity/src/vm/test_util/mod.rs index b7e58919aa8..2df79766a24 100644 --- a/clarity/src/vm/test_util/mod.rs +++ b/clarity/src/vm/test_util/mod.rs @@ -2,12 +2,13 @@ use stacks_common::address::{AddressHashMode, C32_ADDRESS_VERSION_TESTNET_SINGLE use stacks_common::consts::{ BITCOIN_REGTEST_FIRST_BLOCK_HASH, BITCOIN_REGTEST_FIRST_BLOCK_HEIGHT, BITCOIN_REGTEST_FIRST_BLOCK_TIMESTAMP, FIRST_BURNCHAIN_CONSENSUS_HASH, FIRST_STACKS_BLOCK_HASH, + PEER_VERSION_EPOCH_2_0, }; use stacks_common::types::chainstate::{ BlockHeaderHash, BurnchainHeaderHash, ConsensusHash, SortitionId, StacksAddress, StacksBlockId, StacksPrivateKey, StacksPublicKey, VRFSeed, }; -use stacks_common::types::{StacksEpochId, PEER_VERSION_EPOCH_2_0}; +use stacks_common::types::StacksEpochId; use crate::vm::ast::ASTRules; use crate::vm::costs::ExecutionCost; @@ -136,12 +137,17 @@ impl HeadersDB for UnitTestHeaderDB { None } } - fn get_vrf_seed_for_block(&self, _bhh: &StacksBlockId) -> Option { + fn get_vrf_seed_for_block( + &self, + _bhh: &StacksBlockId, + _epoch: &StacksEpochId, + ) -> Option { None } fn get_stacks_block_header_hash_for_block( &self, id_bhh: &StacksBlockId, + _epoch: &StacksEpochId, ) -> Option { if *id_bhh == StacksBlockId::new(&FIRST_BURNCHAIN_CONSENSUS_HASH, &FIRST_STACKS_BLOCK_HASH) { @@ -150,7 +156,11 @@ impl HeadersDB for UnitTestHeaderDB { None } } - fn get_burn_block_time_for_block(&self, id_bhh: &StacksBlockId) -> Option { + fn get_burn_block_time_for_block( + &self, + id_bhh: &StacksBlockId, + _epoch: Option<&StacksEpochId>, + ) -> Option { if *id_bhh == StacksBlockId::new(&FIRST_BURNCHAIN_CONSENSUS_HASH, &FIRST_STACKS_BLOCK_HASH) { Some(BITCOIN_REGTEST_FIRST_BLOCK_TIMESTAMP as u64) @@ -161,6 +171,9 @@ impl HeadersDB for UnitTestHeaderDB { Some(1 + 10 * (id_bhh.as_bytes()[0] as u64)) } } + fn get_stacks_block_time_for_block(&self, id_bhh: &StacksBlockId) -> Option { + Some(1713799973 + 10 * (id_bhh.as_bytes()[0] as u64)) + } fn get_burn_block_height_for_block(&self, id_bhh: &StacksBlockId) -> Option { if *id_bhh == StacksBlockId::new(&FIRST_BURNCHAIN_CONSENSUS_HASH, &FIRST_STACKS_BLOCK_HASH) { @@ -169,11 +182,19 @@ impl HeadersDB for UnitTestHeaderDB { Some(1 + id_bhh.as_bytes()[0] as u32) } } - fn get_miner_address(&self, _id_bhh: &StacksBlockId) -> Option { + fn get_miner_address( + &self, + _id_bhh: &StacksBlockId, + _epoch: &StacksEpochId, + ) -> Option { None } - fn get_consensus_hash_for_block(&self, id_bhh: &StacksBlockId) -> Option { + fn get_consensus_hash_for_block( + &self, + id_bhh: &StacksBlockId, + _epoch: &StacksEpochId, + ) -> Option { if *id_bhh == StacksBlockId::new(&FIRST_BURNCHAIN_CONSENSUS_HASH, &FIRST_STACKS_BLOCK_HASH) { Some(FIRST_BURNCHAIN_CONSENSUS_HASH) @@ -182,23 +203,43 @@ impl HeadersDB for UnitTestHeaderDB { } } - fn get_burnchain_tokens_spent_for_block(&self, id_bhh: &StacksBlockId) -> Option { + fn get_burnchain_tokens_spent_for_block( + &self, + id_bhh: &StacksBlockId, + _epoch: &StacksEpochId, + ) -> Option { // if the block is defined at all, then return a constant self.get_burn_block_height_for_block(id_bhh).map(|_| 2000) } - fn get_burnchain_tokens_spent_for_winning_block(&self, id_bhh: &StacksBlockId) -> Option { + fn get_burnchain_tokens_spent_for_winning_block( + &self, + id_bhh: &StacksBlockId, + _epoch: &StacksEpochId, + ) -> Option { // if the block is defined at all, then return a constant self.get_burn_block_height_for_block(id_bhh).map(|_| 1000) } - fn get_tokens_earned_for_block(&self, id_bhh: &StacksBlockId) -> Option { + fn get_tokens_earned_for_block( + &self, + id_bhh: &StacksBlockId, + _epoch: &StacksEpochId, + ) -> Option { // if the block is defined at all, then return a constant self.get_burn_block_height_for_block(id_bhh).map(|_| 3000) } } impl BurnStateDB for UnitTestBurnStateDB { + fn get_tip_burn_block_height(&self) -> Option { + None + } + + fn get_tip_sortition_id(&self) -> Option { + None + } + fn get_burn_block_height(&self, _sortition_id: &SortitionId) -> Option { None } diff --git a/clarity/src/vm/tests/contracts.rs b/clarity/src/vm/tests/contracts.rs index 817a74917b5..3c4dc14b2e0 100644 --- a/clarity/src/vm/tests/contracts.rs +++ b/clarity/src/vm/tests/contracts.rs @@ -129,8 +129,9 @@ fn test_get_block_info_eval( let contract_identifier = QualifiedContractIdentifier::local(&format!("test-contract-{}", i)).unwrap(); owned_env - .initialize_contract( + .initialize_versioned_contract( contract_identifier.clone(), + ClarityVersion::Clarity2, contracts[i], None, ASTRules::PrecheckSize, @@ -1147,3 +1148,38 @@ fn test_cc_trait_stack_depth( RuntimeErrorType::MaxStackDepthReached.into() ); } + +#[apply(test_epochs)] +fn test_eval_with_non_existing_contract( + epoch: StacksEpochId, + mut env_factory: MemoryEnvironmentGenerator, +) { + let mut owned_env = env_factory.get_env(epoch); + + let mut placeholder_context = ContractContext::new( + QualifiedContractIdentifier::transient(), + ClarityVersion::Clarity2, + ); + + let mut env = owned_env.get_exec_environment( + Some(get_principal().expect_principal().unwrap()), + None, + &mut placeholder_context, + ); + + let result = env.eval_read_only( + &QualifiedContractIdentifier::local("absent").unwrap(), + "(ok 0)", + ); + assert_eq!( + result.as_ref().unwrap_err(), + &Error::Unchecked(CheckErrors::NoSuchContract( + QualifiedContractIdentifier::local("absent") + .unwrap() + .to_string() + )) + ); + drop(env); + owned_env.commit().unwrap(); + assert!(owned_env.destruct().is_some()); +} diff --git a/clarity/src/vm/tests/mod.rs b/clarity/src/vm/tests/mod.rs index 9f98e7e9300..2c6f23ef428 100644 --- a/clarity/src/vm/tests/mod.rs +++ b/clarity/src/vm/tests/mod.rs @@ -33,6 +33,19 @@ mod sequences; #[cfg(test)] mod simple_apply_eval; mod traits; +mod variables; + +#[cfg(any(test, feature = "testing"))] +impl<'a, 'hooks> OwnedEnvironment<'a, 'hooks> { + pub fn set_tenure_height(&mut self, tenure_height: u32) { + self.context.database.begin(); + self.context + .database + .set_tenure_height(tenure_height) + .unwrap(); + self.context.database.commit().unwrap(); + } +} macro_rules! epochs_template { ($($epoch:ident,)*) => { @@ -50,7 +63,6 @@ macro_rules! epochs_template { match epoch { // don't test Epoch-1.0 StacksEpochId::Epoch10 => (), - StacksEpochId::Epoch30 => (), // this will lead to a compile time failure if an epoch is left out // of the epochs_template! macro list $(StacksEpochId::$epoch)|* => (), @@ -76,10 +88,16 @@ macro_rules! clarity_template { match (epoch, clarity) { // don't test Epoch-1.0 (StacksEpochId::Epoch10, _) => (), - (StacksEpochId::Epoch30, _) => (), // don't test these pairs, because they aren't supported: (StacksEpochId::Epoch20, ClarityVersion::Clarity2) => (), (StacksEpochId::Epoch2_05, ClarityVersion::Clarity2) => (), + (StacksEpochId::Epoch20, ClarityVersion::Clarity3) => (), + (StacksEpochId::Epoch2_05, ClarityVersion::Clarity3) => (), + (StacksEpochId::Epoch21, ClarityVersion::Clarity3) => (), + (StacksEpochId::Epoch22, ClarityVersion::Clarity3) => (), + (StacksEpochId::Epoch23, ClarityVersion::Clarity3) => (), + (StacksEpochId::Epoch24, ClarityVersion::Clarity3) => (), + (StacksEpochId::Epoch25, ClarityVersion::Clarity3) => (), // this will lead to a compile time failure if a pair is left out // of the clarity_template! macro list $((StacksEpochId::$epoch, ClarityVersion::$clarity))|* => (), @@ -103,6 +121,7 @@ epochs_template! { Epoch23, Epoch24, Epoch25, + Epoch30, } clarity_template! { @@ -118,6 +137,9 @@ clarity_template! { (Epoch24, Clarity2), (Epoch25, Clarity1), (Epoch25, Clarity2), + (Epoch30, Clarity1), + (Epoch30, Clarity2), + (Epoch30, Clarity3), } #[cfg(test)] @@ -140,7 +162,16 @@ pub fn tl_env_factory() -> TopLevelMemoryEnvironmentGenerator { pub struct MemoryEnvironmentGenerator(MemoryBackingStore); impl MemoryEnvironmentGenerator { fn get_env(&mut self, epoch: StacksEpochId) -> OwnedEnvironment { - let mut owned_env = OwnedEnvironment::new(self.0.as_clarity_db(), epoch); + let mut db = self.0.as_clarity_db(); + db.begin(); + db.set_clarity_epoch_version(epoch).unwrap(); + db.commit().unwrap(); + if epoch.clarity_uses_tip_burn_block() { + db.begin(); + db.set_tenure_height(1).unwrap(); + db.commit().unwrap(); + } + let mut owned_env = OwnedEnvironment::new(db, epoch); // start an initial transaction. owned_env.begin(); owned_env @@ -150,8 +181,16 @@ impl MemoryEnvironmentGenerator { pub struct TopLevelMemoryEnvironmentGenerator(MemoryBackingStore); impl TopLevelMemoryEnvironmentGenerator { pub fn get_env(&mut self, epoch: StacksEpochId) -> OwnedEnvironment { - let owned_env = OwnedEnvironment::new(self.0.as_clarity_db(), epoch); - owned_env + let mut db = self.0.as_clarity_db(); + db.begin(); + db.set_clarity_epoch_version(epoch).unwrap(); + db.commit().unwrap(); + if epoch.clarity_uses_tip_burn_block() { + db.begin(); + db.set_tenure_height(1).unwrap(); + db.commit().unwrap(); + } + OwnedEnvironment::new(db, epoch) } } diff --git a/clarity/src/vm/tests/principals.rs b/clarity/src/vm/tests/principals.rs index 78fcf176594..44f3447bad7 100644 --- a/clarity/src/vm/tests/principals.rs +++ b/clarity/src/vm/tests/principals.rs @@ -657,7 +657,7 @@ fn test_principal_destruct_bad_version_byte() { // Standard case where construction should work. We compare the output of the // Clarity function to hand-built principals. fn test_principal_construct_good() { - // We always use the the same bytes buffer. + // We always use the same bytes buffer. let mut transfer_buffer = [0u8; 20]; transfer_buffer .copy_from_slice(&hex_bytes("fa6bf38ed557fe417333710d6033e9419391a320").unwrap()); diff --git a/clarity/src/vm/tests/variables.rs b/clarity/src/vm/tests/variables.rs new file mode 100644 index 00000000000..5b392bb6782 --- /dev/null +++ b/clarity/src/vm/tests/variables.rs @@ -0,0 +1,1079 @@ +// Copyright (C) 2013-2020 Blockstack PBC, a public benefit corporation +// Copyright (C) 2020-2024 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +#[cfg(any(test, feature = "testing"))] +use rstest::rstest; +use stacks_common::types::StacksEpochId; + +#[cfg(test)] +use crate::vm::analysis::type_checker::v2_1::tests::contracts::type_check_version; +use crate::vm::analysis::{run_analysis, CheckError}; +use crate::vm::ast::{parse, ASTRules}; +use crate::vm::database::MemoryBackingStore; +use crate::vm::errors::{CheckErrors, Error}; +use crate::vm::tests::{test_clarity_versions, tl_env_factory, TopLevelMemoryEnvironmentGenerator}; +use crate::vm::types::{QualifiedContractIdentifier, Value}; +use crate::vm::{ClarityVersion, ContractContext}; + +#[apply(test_clarity_versions)] +fn test_block_height( + version: ClarityVersion, + epoch: StacksEpochId, + mut tl_env_factory: TopLevelMemoryEnvironmentGenerator, +) { + let contract = "(define-read-only (test-func) block-height)"; + + let mut placeholder_context = + ContractContext::new(QualifiedContractIdentifier::transient(), version); + + let mut owned_env = tl_env_factory.get_env(epoch); + let contract_identifier = QualifiedContractIdentifier::local("test-contract").unwrap(); + + let mut exprs = parse(&contract_identifier, &contract, version, epoch).unwrap(); + let mut marf = MemoryBackingStore::new(); + let mut db = marf.as_analysis_db(); + let analysis = db.execute(|db| { + type_check_version(&contract_identifier, &mut exprs, db, true, epoch, version) + }); + if version >= ClarityVersion::Clarity3 { + let err = analysis.unwrap_err(); + assert_eq!( + CheckErrors::UndefinedVariable("block-height".to_string()), + err.err + ); + } else { + assert!(analysis.is_ok()); + } + + // Initialize the contract + // Note that we're ignoring the analysis failure here so that we can test + // the runtime behavior. In Clarity 3, if this case somehow gets past the + // analysis, it should fail at runtime. + let result = owned_env.initialize_versioned_contract( + contract_identifier.clone(), + version, + contract, + None, + ASTRules::PrecheckSize, + ); + + let mut env = owned_env.get_exec_environment(None, None, &mut placeholder_context); + + // Call the function + let eval_result = env.eval_read_only(&contract_identifier, "(test-func)"); + // In Clarity 3, this should trigger a runtime error + if version >= ClarityVersion::Clarity3 { + let err = eval_result.unwrap_err(); + assert_eq!( + Error::Unchecked(CheckErrors::UndefinedVariable("block-height".to_string(),)), + err + ); + } else { + assert_eq!(Ok(Value::UInt(1)), eval_result); + } +} + +#[apply(test_clarity_versions)] +fn test_stacks_block_height( + version: ClarityVersion, + epoch: StacksEpochId, + mut tl_env_factory: TopLevelMemoryEnvironmentGenerator, +) { + let contract = "(define-read-only (test-func) stacks-block-height)"; + + let mut placeholder_context = + ContractContext::new(QualifiedContractIdentifier::transient(), version); + + let mut owned_env = tl_env_factory.get_env(epoch); + let contract_identifier = QualifiedContractIdentifier::local("test-contract").unwrap(); + + let mut exprs = parse(&contract_identifier, &contract, version, epoch).unwrap(); + let mut marf = MemoryBackingStore::new(); + let mut db = marf.as_analysis_db(); + let analysis = db.execute(|db| { + type_check_version(&contract_identifier, &mut exprs, db, true, epoch, version) + }); + if version < ClarityVersion::Clarity3 { + let err = analysis.unwrap_err(); + assert_eq!( + CheckErrors::UndefinedVariable("stacks-block-height".to_string()), + err.err + ); + } else { + assert!(analysis.is_ok()); + } + + // Initialize the contract + // Note that we're ignoring the analysis failure here so that we can test + // the runtime behavior. In Clarity 3, if this case somehow gets past the + // analysis, it should fail at runtime. + let result = owned_env.initialize_versioned_contract( + contract_identifier.clone(), + version, + contract, + None, + ASTRules::PrecheckSize, + ); + + let mut env = owned_env.get_exec_environment(None, None, &mut placeholder_context); + + // Call the function + let eval_result = env.eval_read_only(&contract_identifier, "(test-func)"); + // In Clarity 3, this should trigger a runtime error + if version < ClarityVersion::Clarity3 { + let err = eval_result.unwrap_err(); + assert_eq!( + Error::Unchecked(CheckErrors::UndefinedVariable( + "stacks-block-height".to_string(), + )), + err + ); + } else { + assert_eq!(Ok(Value::UInt(1)), eval_result); + } +} + +#[apply(test_clarity_versions)] +fn test_tenure_height( + version: ClarityVersion, + epoch: StacksEpochId, + mut tl_env_factory: TopLevelMemoryEnvironmentGenerator, +) { + let contract = "(define-read-only (test-func) tenure-height)"; + + let mut placeholder_context = + ContractContext::new(QualifiedContractIdentifier::transient(), version); + + let mut owned_env = tl_env_factory.get_env(epoch); + let contract_identifier = QualifiedContractIdentifier::local("test-contract").unwrap(); + + let mut exprs = parse(&contract_identifier, &contract, version, epoch).unwrap(); + let mut marf = MemoryBackingStore::new(); + let mut db = marf.as_analysis_db(); + let analysis = db.execute(|db| { + type_check_version(&contract_identifier, &mut exprs, db, true, epoch, version) + }); + if version < ClarityVersion::Clarity3 { + let err = analysis.unwrap_err(); + assert_eq!( + CheckErrors::UndefinedVariable("tenure-height".to_string()), + err.err + ); + } else { + assert!(analysis.is_ok()); + } + + // Initialize the contract + // Note that we're ignoring the analysis failure here so that we can test + // the runtime behavior. In Clarity 3, if this case somehow gets past the + // analysis, it should fail at runtime. + let result = owned_env.initialize_versioned_contract( + contract_identifier.clone(), + version, + contract, + None, + ASTRules::PrecheckSize, + ); + + let mut env = owned_env.get_exec_environment(None, None, &mut placeholder_context); + + // Call the function + let eval_result = env.eval_read_only(&contract_identifier, "(test-func)"); + // In Clarity 3, this should trigger a runtime error + if version < ClarityVersion::Clarity3 { + let err = eval_result.unwrap_err(); + assert_eq!( + Error::Unchecked(CheckErrors::UndefinedVariable("tenure-height".to_string(),)), + err + ); + } else { + assert_eq!(Ok(Value::UInt(1)), eval_result); + } +} + +#[derive(Debug, PartialEq)] +enum WhenError { + Analysis, + Initialization, + Runtime, + Never, +} + +#[cfg(test)] +fn expect_contract_error( + version: ClarityVersion, + epoch: StacksEpochId, + tl_env_factory: &mut TopLevelMemoryEnvironmentGenerator, + name: &str, + contract: &str, + expected_errors: &[( + WhenError, + fn(ClarityVersion, StacksEpochId) -> bool, + CheckErrors, + )], + expected_success: Value, +) { + let mut placeholder_context = + ContractContext::new(QualifiedContractIdentifier::local(name).unwrap(), version); + + let mut owned_env = tl_env_factory.get_env(epoch); + let contract_identifier = QualifiedContractIdentifier::local(name).unwrap(); + + let mut exprs = parse(&contract_identifier, &contract, version, epoch).unwrap(); + let mut marf = MemoryBackingStore::new(); + let mut db = marf.as_analysis_db(); + let analysis = db.execute(|db| { + type_check_version(&contract_identifier, &mut exprs, db, true, epoch, version) + }); + + for (when, err_condition, expected_error) in expected_errors { + if *when == WhenError::Analysis && err_condition(version, epoch) { + let err = analysis.unwrap_err(); + assert_eq!(*expected_error, err.err); + + // Do not continue with the test if the analysis failed. + return; + } + } + + // The type-checker does not report an error for the reuse of the built-in + // name `stacks-block-height`. It is instead caught at initialization. This + // matches the behavior of Clarity 1 and 2. + assert!(analysis.is_ok()); + + // Initialize the contract + // Note that we're ignoring the analysis failure here so that we can test + // the runtime behavior. In Clarity 3, if this case somehow gets past the + // analysis, it should fail at runtime. + let init_result = owned_env.initialize_versioned_contract( + contract_identifier.clone(), + version, + contract, + None, + ASTRules::PrecheckSize, + ); + + for (when, err_condition, expected_error) in expected_errors { + if *when == WhenError::Initialization && err_condition(version, epoch) { + let err = init_result.unwrap_err(); + if let Error::Unchecked(inner_err) = &err { + assert_eq!(expected_error, inner_err); + } else { + panic!("Expected an Unchecked error, but got a different error"); + } + + // Do not continue with the test if the initialization failed. + return; + } + } + + let mut env = owned_env.get_exec_environment(None, None, &mut placeholder_context); + + // Call the function + let eval_result = env.eval_read_only(&contract_identifier, "(test-func)"); + + for (when, err_condition, expected_error) in expected_errors { + if *when == WhenError::Runtime && err_condition(version, epoch) { + let err = eval_result.unwrap_err(); + if let Error::Unchecked(inner_err) = &err { + assert_eq!(expected_error, inner_err); + } else { + panic!("Expected an Unchecked error, but got a different error"); + } + + // Do not continue with the test if the evaluation failed. + return; + } + } + + assert_eq!(Ok(expected_success), eval_result); +} + +#[apply(test_clarity_versions)] +fn reuse_block_height( + version: ClarityVersion, + epoch: StacksEpochId, + mut tl_env_factory: TopLevelMemoryEnvironmentGenerator, +) { + // data var + expect_contract_error( + version, + epoch, + &mut tl_env_factory, + "data-var", + r#" + (define-data-var block-height uint u1234) + (define-read-only (test-func) + (var-get block-height) + ) + "#, + &[ + ( + WhenError::Initialization, + |version, _| version < ClarityVersion::Clarity3, + CheckErrors::NameAlreadyUsed("block-height".to_string()), + ), + ( + WhenError::Analysis, + |version, _| version >= ClarityVersion::Clarity3, + CheckErrors::ReservedWord("block-height".to_string()), + ), + ], + Value::UInt(1234), + ); + + // map + expect_contract_error( + version, + epoch, + &mut tl_env_factory, + "map", + r#" + (define-map block-height uint uint) + (define-private (test-func) + (map-insert block-height u1 u2) + ) + "#, + &[ + ( + WhenError::Initialization, + |version, _| version < ClarityVersion::Clarity3, + CheckErrors::NameAlreadyUsed("block-height".to_string()), + ), + ( + WhenError::Analysis, + |version, _| version >= ClarityVersion::Clarity3, + CheckErrors::ReservedWord("block-height".to_string()), + ), + ], + Value::Bool(true), + ); + + // let + expect_contract_error( + version, + epoch, + &mut tl_env_factory, + "let", + r#" + (define-private (test-func) + (let ((block-height 32)) + block-height + ) + ) + "#, + &[ + ( + WhenError::Runtime, + |version, _| version < ClarityVersion::Clarity3, + CheckErrors::NameAlreadyUsed("block-height".to_string()), + ), + ( + WhenError::Analysis, + |version, _| version >= ClarityVersion::Clarity3, + CheckErrors::ReservedWord("block-height".to_string()), + ), + ], + Value::Int(32), + ); + + // match binding + expect_contract_error( + version, + epoch, + &mut tl_env_factory, + "match-binding", + r#" + (define-read-only (test-func) + (let ((x (if true (ok u5) (err u7)))) + (match x + block-height 3 + e 4 + ) + ) + ) + "#, + &[ + ( + WhenError::Runtime, + |version, _| version < ClarityVersion::Clarity3, + CheckErrors::NameAlreadyUsed("block-height".to_string()), + ), + ( + WhenError::Analysis, + |version, _| version >= ClarityVersion::Clarity3, + CheckErrors::ReservedWord("block-height".to_string()), + ), + ], + Value::Int(3), + ); + + // private function + expect_contract_error( + version, + epoch, + &mut tl_env_factory, + "function", + r#" + (define-private (block-height) true) + (define-private (test-func) (block-height)) + "#, + &[ + ( + WhenError::Initialization, + |version, _| version < ClarityVersion::Clarity3, + CheckErrors::NameAlreadyUsed("block-height".to_string()), + ), + ( + WhenError::Analysis, + |version, _| version >= ClarityVersion::Clarity3, + CheckErrors::ReservedWord("block-height".to_string()), + ), + ], + Value::Bool(true), + ); + + // constant + expect_contract_error( + version, + epoch, + &mut tl_env_factory, + "constant", + r#" + (define-constant block-height u1234) + (define-read-only (test-func) block-height) + "#, + &[ + ( + WhenError::Initialization, + |version, _| version < ClarityVersion::Clarity3, + CheckErrors::NameAlreadyUsed("block-height".to_string()), + ), + ( + WhenError::Analysis, + |version, _| version >= ClarityVersion::Clarity3, + CheckErrors::ReservedWord("block-height".to_string()), + ), + ], + Value::UInt(1234), + ); + + // define-trait + expect_contract_error( + version, + epoch, + &mut tl_env_factory, + "trait", + r#" + (define-trait block-height ()) + (define-read-only (test-func) false) + "#, + &[ + ( + WhenError::Initialization, + |version, _| version < ClarityVersion::Clarity3, + CheckErrors::NameAlreadyUsed("block-height".to_string()), + ), + ( + WhenError::Analysis, + |version, _| version >= ClarityVersion::Clarity3, + CheckErrors::ReservedWord("block-height".to_string()), + ), + ], + Value::Bool(false), + ); + + // tuple + expect_contract_error( + version, + epoch, + &mut tl_env_factory, + "tuple", + r#" + (define-read-only (test-func) + (get block-height { block-height: 1234 }) + ) + "#, + &[], + Value::Int(1234), + ); + + // define-fungible-token + expect_contract_error( + version, + epoch, + &mut tl_env_factory, + "trait", + r#" + (define-fungible-token block-height) + (define-read-only (test-func) false) + "#, + &[ + ( + WhenError::Initialization, + |version, _| version < ClarityVersion::Clarity3, + CheckErrors::NameAlreadyUsed("block-height".to_string()), + ), + ( + WhenError::Analysis, + |version, _| version >= ClarityVersion::Clarity3, + CheckErrors::ReservedWord("block-height".to_string()), + ), + ], + Value::Bool(false), + ); + + // define-non-fungible-token + expect_contract_error( + version, + epoch, + &mut tl_env_factory, + "trait", + r#" + (define-non-fungible-token block-height uint) + (define-read-only (test-func) false) + "#, + &[ + ( + WhenError::Initialization, + |version, _| version < ClarityVersion::Clarity3, + CheckErrors::NameAlreadyUsed("block-height".to_string()), + ), + ( + WhenError::Analysis, + |version, _| version >= ClarityVersion::Clarity3, + CheckErrors::ReservedWord("block-height".to_string()), + ), + ], + Value::Bool(false), + ); + + // define-public + expect_contract_error( + version, + epoch, + &mut tl_env_factory, + "function", + r#" + (define-public (block-height) (ok true)) + (define-private (test-func) (unwrap-panic (block-height))) + "#, + &[ + ( + WhenError::Initialization, + |version, _| version < ClarityVersion::Clarity3, + CheckErrors::NameAlreadyUsed("block-height".to_string()), + ), + ( + WhenError::Analysis, + |version, _| version >= ClarityVersion::Clarity3, + CheckErrors::ReservedWord("block-height".to_string()), + ), + ], + Value::Bool(true), + ); + + // define-read-only + expect_contract_error( + version, + epoch, + &mut tl_env_factory, + "function", + r#" + (define-read-only (block-height) true) + (define-private (test-func) (block-height)) + "#, + &[ + ( + WhenError::Initialization, + |version, _| version < ClarityVersion::Clarity3, + CheckErrors::NameAlreadyUsed("block-height".to_string()), + ), + ( + WhenError::Analysis, + |version, _| version >= ClarityVersion::Clarity3, + CheckErrors::ReservedWord("block-height".to_string()), + ), + ], + Value::Bool(true), + ); +} + +#[apply(test_clarity_versions)] +fn reuse_stacks_block_height( + version: ClarityVersion, + epoch: StacksEpochId, + mut tl_env_factory: TopLevelMemoryEnvironmentGenerator, +) { + // data var + expect_contract_error( + version, + epoch, + &mut tl_env_factory, + "data-var", + r#" + (define-data-var stacks-block-height uint u1234) + (define-read-only (test-func) + (var-get stacks-block-height) + ) + "#, + &[( + WhenError::Initialization, + |version, _| version >= ClarityVersion::Clarity3, + CheckErrors::NameAlreadyUsed("stacks-block-height".to_string()), + )], + Value::UInt(1234), + ); + + // map + expect_contract_error( + version, + epoch, + &mut tl_env_factory, + "map", + r#" + (define-map stacks-block-height uint uint) + (define-private (test-func) + (map-insert stacks-block-height u1 u2) + ) + "#, + &[( + WhenError::Initialization, + |version, _| version >= ClarityVersion::Clarity3, + CheckErrors::NameAlreadyUsed("stacks-block-height".to_string()), + )], + Value::Bool(true), + ); + + // let + expect_contract_error( + version, + epoch, + &mut tl_env_factory, + "let", + r#" + (define-private (test-func) + (let ((stacks-block-height 32)) + stacks-block-height + ) + ) + "#, + &[( + WhenError::Runtime, + |version, _| version >= ClarityVersion::Clarity3, + CheckErrors::NameAlreadyUsed("stacks-block-height".to_string()), + )], + Value::Int(32), + ); + + // match binding + expect_contract_error( + version, + epoch, + &mut tl_env_factory, + "match-binding", + r#" + (define-read-only (test-func) + (let ((x (if true (ok u5) (err u7)))) + (match x + stacks-block-height 3 + e 4 + ) + ) + ) + "#, + &[( + WhenError::Runtime, + |version, _| version >= ClarityVersion::Clarity3, + CheckErrors::NameAlreadyUsed("stacks-block-height".to_string()), + )], + Value::Int(3), + ); + + // function + expect_contract_error( + version, + epoch, + &mut tl_env_factory, + "function", + r#" + (define-private (stacks-block-height) true) + (define-private (test-func) (stacks-block-height)) + "#, + &[( + WhenError::Initialization, + |version, _| version >= ClarityVersion::Clarity3, + CheckErrors::NameAlreadyUsed("stacks-block-height".to_string()), + )], + Value::Bool(true), + ); + + // constant + expect_contract_error( + version, + epoch, + &mut tl_env_factory, + "constant", + r#" + (define-constant stacks-block-height u1234) + (define-read-only (test-func) stacks-block-height) + "#, + &[( + WhenError::Initialization, + |version, _| version >= ClarityVersion::Clarity3, + CheckErrors::NameAlreadyUsed("stacks-block-height".to_string()), + )], + Value::UInt(1234), + ); + + // define-trait + expect_contract_error( + version, + epoch, + &mut tl_env_factory, + "trait", + r#" + (define-trait stacks-block-height ()) + (define-read-only (test-func) false) + "#, + &[( + WhenError::Initialization, + |version, _| version >= ClarityVersion::Clarity3, + CheckErrors::NameAlreadyUsed("stacks-block-height".to_string()), + )], + Value::Bool(false), + ); + + // tuple + expect_contract_error( + version, + epoch, + &mut tl_env_factory, + "tuple", + r#" + (define-read-only (test-func) + (get stacks-block-height { stacks-block-height: 1234 }) + ) + "#, + &[], + Value::Int(1234), + ); + + // define-fungible-token + expect_contract_error( + version, + epoch, + &mut tl_env_factory, + "trait", + r#" + (define-fungible-token stacks-block-height) + (define-read-only (test-func) false) + "#, + &[( + WhenError::Initialization, + |version, _| version >= ClarityVersion::Clarity3, + CheckErrors::NameAlreadyUsed("stacks-block-height".to_string()), + )], + Value::Bool(false), + ); + + // define-non-fungible-token + expect_contract_error( + version, + epoch, + &mut tl_env_factory, + "trait", + r#" + (define-non-fungible-token stacks-block-height uint) + (define-read-only (test-func) false) + "#, + &[( + WhenError::Initialization, + |version, _| version >= ClarityVersion::Clarity3, + CheckErrors::NameAlreadyUsed("stacks-block-height".to_string()), + )], + Value::Bool(false), + ); + + // define-public + expect_contract_error( + version, + epoch, + &mut tl_env_factory, + "function", + r#" + (define-public (stacks-block-height) (ok true)) + (define-private (test-func) (unwrap-panic (stacks-block-height))) + "#, + &[( + WhenError::Initialization, + |version, _| version >= ClarityVersion::Clarity3, + CheckErrors::NameAlreadyUsed("stacks-block-height".to_string()), + )], + Value::Bool(true), + ); + + // define-read-only + expect_contract_error( + version, + epoch, + &mut tl_env_factory, + "function", + r#" + (define-read-only (stacks-block-height) true) + (define-private (test-func) (stacks-block-height)) + "#, + &[( + WhenError::Initialization, + |version, _| version >= ClarityVersion::Clarity3, + CheckErrors::NameAlreadyUsed("stacks-block-height".to_string()), + )], + Value::Bool(true), + ); +} + +#[apply(test_clarity_versions)] +fn reuse_tenure_height( + version: ClarityVersion, + epoch: StacksEpochId, + mut tl_env_factory: TopLevelMemoryEnvironmentGenerator, +) { + // data var + expect_contract_error( + version, + epoch, + &mut tl_env_factory, + "data-var", + r#" + (define-data-var tenure-height uint u1234) + (define-read-only (test-func) + (var-get tenure-height) + ) + "#, + &[( + WhenError::Initialization, + |version, _| version >= ClarityVersion::Clarity3, + CheckErrors::NameAlreadyUsed("tenure-height".to_string()), + )], + Value::UInt(1234), + ); + + // map + expect_contract_error( + version, + epoch, + &mut tl_env_factory, + "map", + r#" + (define-map tenure-height uint uint) + (define-private (test-func) + (map-insert tenure-height u1 u2) + ) + "#, + &[( + WhenError::Initialization, + |version, _| version >= ClarityVersion::Clarity3, + CheckErrors::NameAlreadyUsed("tenure-height".to_string()), + )], + Value::Bool(true), + ); + + // let + expect_contract_error( + version, + epoch, + &mut tl_env_factory, + "let", + r#" + (define-private (test-func) + (let ((tenure-height 32)) + tenure-height + ) + ) + "#, + &[( + WhenError::Runtime, + |version, _| version >= ClarityVersion::Clarity3, + CheckErrors::NameAlreadyUsed("tenure-height".to_string()), + )], + Value::Int(32), + ); + + // match binding + expect_contract_error( + version, + epoch, + &mut tl_env_factory, + "match-binding", + r#" + (define-read-only (test-func) + (let ((x (if true (ok u5) (err u7)))) + (match x + tenure-height 3 + e 4 + ) + ) + ) + "#, + &[( + WhenError::Runtime, + |version, _| version >= ClarityVersion::Clarity3, + CheckErrors::NameAlreadyUsed("tenure-height".to_string()), + )], + Value::Int(3), + ); + + // function + expect_contract_error( + version, + epoch, + &mut tl_env_factory, + "function", + r#" + (define-private (tenure-height) true) + (define-private (test-func) (tenure-height)) + "#, + &[( + WhenError::Initialization, + |version, _| version >= ClarityVersion::Clarity3, + CheckErrors::NameAlreadyUsed("tenure-height".to_string()), + )], + Value::Bool(true), + ); + + // constant + expect_contract_error( + version, + epoch, + &mut tl_env_factory, + "constant", + r#" + (define-constant tenure-height u1234) + (define-read-only (test-func) tenure-height) + "#, + &[( + WhenError::Initialization, + |version, _| version >= ClarityVersion::Clarity3, + CheckErrors::NameAlreadyUsed("tenure-height".to_string()), + )], + Value::UInt(1234), + ); + + // define-trait + expect_contract_error( + version, + epoch, + &mut tl_env_factory, + "trait", + r#" + (define-trait tenure-height ()) + (define-read-only (test-func) false) + "#, + &[( + WhenError::Initialization, + |version, _| version >= ClarityVersion::Clarity3, + CheckErrors::NameAlreadyUsed("tenure-height".to_string()), + )], + Value::Bool(false), + ); + + // tuple + expect_contract_error( + version, + epoch, + &mut tl_env_factory, + "tuple", + r#" + (define-read-only (test-func) + (get tenure-height { tenure-height: 1234 }) + ) + "#, + &[], + Value::Int(1234), + ); + + // define-fungible-token + expect_contract_error( + version, + epoch, + &mut tl_env_factory, + "trait", + r#" + (define-fungible-token tenure-height) + (define-read-only (test-func) false) + "#, + &[( + WhenError::Initialization, + |version, _| version >= ClarityVersion::Clarity3, + CheckErrors::NameAlreadyUsed("tenure-height".to_string()), + )], + Value::Bool(false), + ); + + // define-non-fungible-token + expect_contract_error( + version, + epoch, + &mut tl_env_factory, + "trait", + r#" + (define-non-fungible-token tenure-height uint) + (define-read-only (test-func) false) + "#, + &[( + WhenError::Initialization, + |version, _| version >= ClarityVersion::Clarity3, + CheckErrors::NameAlreadyUsed("tenure-height".to_string()), + )], + Value::Bool(false), + ); + + // define-public + expect_contract_error( + version, + epoch, + &mut tl_env_factory, + "function", + r#" + (define-public (tenure-height) (ok true)) + (define-private (test-func) (unwrap-panic (tenure-height))) + "#, + &[( + WhenError::Initialization, + |version, _| version >= ClarityVersion::Clarity3, + CheckErrors::NameAlreadyUsed("tenure-height".to_string()), + )], + Value::Bool(true), + ); + + // define-read-only + expect_contract_error( + version, + epoch, + &mut tl_env_factory, + "function", + r#" + (define-read-only (tenure-height) true) + (define-private (test-func) (tenure-height)) + "#, + &[( + WhenError::Initialization, + |version, _| version >= ClarityVersion::Clarity3, + CheckErrors::NameAlreadyUsed("tenure-height".to_string()), + )], + Value::Bool(true), + ); +} diff --git a/clarity/src/vm/tooling/mod.rs b/clarity/src/vm/tooling/mod.rs new file mode 100644 index 00000000000..f218b2ccab5 --- /dev/null +++ b/clarity/src/vm/tooling/mod.rs @@ -0,0 +1,60 @@ +use std::collections::{BTreeMap, HashMap, HashSet}; + +use stacks_common::consts::CHAIN_ID_TESTNET; +use stacks_common::types::StacksEpochId; + +use super::analysis::ContractAnalysis; +use super::contexts::GlobalContext; +use super::docs::contracts::ContractRef; +use super::types::TypeSignature; +use super::{eval_all, ClarityVersion, ContractContext, Error as VmError, Value}; +use crate::vm::analysis::{run_analysis, CheckResult}; +use crate::vm::ast::{build_ast_with_rules, ASTRules}; +use crate::vm::costs::LimitedCostTracker; +use crate::vm::database::MemoryBackingStore; +use crate::vm::types::QualifiedContractIdentifier; + +/// Used by CLI tools like the docs generator. Not used in production +pub fn mem_type_check( + snippet: &str, + version: ClarityVersion, + epoch: StacksEpochId, +) -> CheckResult<(Option, ContractAnalysis)> { + let contract_identifier = QualifiedContractIdentifier::transient(); + let mut contract = build_ast_with_rules( + &contract_identifier, + snippet, + &mut (), + version, + epoch, + ASTRules::PrecheckSize, + ) + .unwrap() + .expressions; + + let mut marf = MemoryBackingStore::new(); + let mut analysis_db = marf.as_analysis_db(); + let cost_tracker = LimitedCostTracker::new_free(); + match run_analysis( + &QualifiedContractIdentifier::transient(), + &mut contract, + &mut analysis_db, + false, + cost_tracker, + epoch, + version, + true, + ) { + Ok(x) => { + // return the first type result of the type checker + let first_type = x + .type_map + .as_ref() + .unwrap() + .get_type_expected(&x.expressions.last().unwrap()) + .cloned(); + Ok((first_type, x)) + } + Err((e, _)) => Err(e), + } +} diff --git a/clarity/src/vm/types/mod.rs b/clarity/src/vm/types/mod.rs index 46734dcc517..e1837ee0343 100644 --- a/clarity/src/vm/types/mod.rs +++ b/clarity/src/vm/types/mod.rs @@ -732,6 +732,22 @@ define_named_enum!(BurnBlockInfoProperty { PoxAddrs("pox-addrs"), }); +define_named_enum!(StacksBlockInfoProperty { + IndexHeaderHash("id-header-hash"), + HeaderHash("header-hash"), + Time("time"), +}); + +define_named_enum!(TenureInfoProperty { + Time("time"), + VrfSeed("vrf-seed"), + BurnchainHeaderHash("burnchain-header-hash"), + MinerAddress("miner-address"), + MinerSpendWinner("miner-spend-winner"), + MinerSpendTotal("miner-spend-total"), + BlockReward("block-reward"), +}); + impl OptionalData { pub fn type_signature(&self) -> std::result::Result { let type_result = match self.data { @@ -771,19 +787,6 @@ impl BlockInfoProperty { MinerAddress => TypeSignature::PrincipalType, } } - - pub fn lookup_by_name_at_version( - name: &str, - version: &ClarityVersion, - ) -> Option { - BlockInfoProperty::lookup_by_name(name).and_then(|native_function| { - if &native_function.get_version() <= version { - Some(native_function) - } else { - None - } - }) - } } impl BurnBlockInfoProperty { @@ -819,6 +822,27 @@ impl BurnBlockInfoProperty { } } +impl StacksBlockInfoProperty { + pub fn type_result(&self) -> TypeSignature { + use self::StacksBlockInfoProperty::*; + match self { + Time => TypeSignature::UIntType, + IndexHeaderHash | HeaderHash => BUFF_32.clone(), + } + } +} + +impl TenureInfoProperty { + pub fn type_result(&self) -> TypeSignature { + use self::TenureInfoProperty::*; + match self { + Time | MinerSpendWinner | MinerSpendTotal | BlockReward => TypeSignature::UIntType, + VrfSeed | BurnchainHeaderHash => BUFF_32.clone(), + MinerAddress => TypeSignature::PrincipalType, + } + } +} + impl PartialEq for ListData { fn eq(&self, other: &ListData) -> bool { self.data == other.data @@ -1529,9 +1553,7 @@ impl TupleData { self.data_map.is_empty() } - ///TODO: #4587 create default for TupleData, then check if the mutation tests are caught for the case: - /// Ok((Default::default())) - /// Or keep the skip and remove the comment + // TODO: add tests from mutation testing results #4833 #[cfg_attr(test, mutants::skip)] pub fn from_data(data: Vec<(ClarityName, Value)>) -> Result { let mut type_map = BTreeMap::new(); @@ -1549,9 +1571,7 @@ impl TupleData { Self::new(TupleTypeSignature::try_from(type_map)?, data_map) } - ///TODO: #4587 create default for TupleData, then check if the mutation tests are caught for the case: - /// Ok((Default::default())) - /// Or keep the skip and remove the comment + // TODO: add tests from mutation testing results #4834 #[cfg_attr(test, mutants::skip)] pub fn from_data_typed( epoch: &StacksEpochId, diff --git a/clarity/src/vm/types/signatures.rs b/clarity/src/vm/types/signatures.rs index c9971f97aeb..280258e0266 100644 --- a/clarity/src/vm/types/signatures.rs +++ b/clarity/src/vm/types/signatures.rs @@ -15,8 +15,10 @@ // along with this program. If not, see . use std::collections::btree_map::Entry; -use std::collections::{hash_map, BTreeMap, HashMap}; +use std::collections::{hash_map, BTreeMap}; use std::hash::{Hash, Hasher}; +use std::ops::Deref; +use std::sync::Arc; use std::{cmp, fmt}; // TypeSignatures @@ -76,7 +78,36 @@ impl AssetIdentifier { #[derive(Clone, PartialEq, Eq, Serialize, Deserialize)] pub struct TupleTypeSignature { - type_map: HashMap, + #[serde(with = "tuple_type_map_serde")] + type_map: Arc>, +} + +mod tuple_type_map_serde { + use std::collections::BTreeMap; + use std::ops::Deref; + use std::sync::Arc; + + use serde::{Deserializer, Serializer}; + + use super::TypeSignature; + use crate::vm::ClarityName; + + pub fn serialize( + map: &Arc>, + ser: S, + ) -> Result { + serde::Serialize::serialize(map.deref(), ser) + } + + pub fn deserialize<'de, D>( + deser: D, + ) -> Result>, D::Error> + where + D: Deserializer<'de>, + { + let map = serde::Deserialize::deserialize(deser)?; + Ok(Arc::new(map)) + } } #[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord, Serialize, Deserialize)] @@ -787,12 +818,12 @@ impl TypeSignature { inner_type.1.canonicalize_v2_1(), ))), TupleType(ref tuple_sig) => { - let mut canonicalized_fields = HashMap::new(); + let mut canonicalized_fields = BTreeMap::new(); for (field_name, field_type) in tuple_sig.get_type_map() { canonicalized_fields.insert(field_name.clone(), field_type.canonicalize_v2_1()); } TypeSignature::from(TupleTypeSignature { - type_map: canonicalized_fields, + type_map: Arc::new(canonicalized_fields), }) } TraitReferenceType(trait_id) => CallableType(CallableSubtype::Trait(trait_id.clone())), @@ -851,9 +882,9 @@ impl TryFrom> for TupleTypeSignature { return Err(CheckErrors::EmptyTuplesNotAllowed); } - let mut type_map = HashMap::new(); + let mut type_map = BTreeMap::new(); for (name, type_info) in type_data.into_iter() { - if let hash_map::Entry::Vacant(e) = type_map.entry(name.clone()) { + if let Entry::Vacant(e) = type_map.entry(name.clone()) { e.insert(type_info); } else { return Err(CheckErrors::NameAlreadyUsed(name.into())); @@ -874,30 +905,7 @@ impl TryFrom> for TupleTypeSignature { return Err(CheckErrors::TypeSignatureTooDeep); } } - let type_map = type_map.into_iter().collect(); - let result = TupleTypeSignature { type_map }; - let would_be_size = result - .inner_size()? - .ok_or_else(|| CheckErrors::ValueTooLarge)?; - if would_be_size > MAX_VALUE_SIZE { - Err(CheckErrors::ValueTooLarge) - } else { - Ok(result) - } - } -} - -impl TryFrom> for TupleTypeSignature { - type Error = CheckErrors; - fn try_from(type_map: HashMap) -> Result { - if type_map.is_empty() { - return Err(CheckErrors::EmptyTuplesNotAllowed); - } - for child_sig in type_map.values() { - if (1 + child_sig.depth()) > MAX_TYPE_DEPTH { - return Err(CheckErrors::TypeSignatureTooDeep); - } - } + let type_map = Arc::new(type_map.into_iter().collect()); let result = TupleTypeSignature { type_map }; let would_be_size = result .inner_size()? @@ -925,7 +933,7 @@ impl TupleTypeSignature { self.type_map.get(field) } - pub fn get_type_map(&self) -> &HashMap { + pub fn get_type_map(&self) -> &BTreeMap { &self.type_map } @@ -961,7 +969,7 @@ impl TupleTypeSignature { } pub fn shallow_merge(&mut self, update: &mut TupleTypeSignature) { - self.type_map.extend(update.type_map.drain()); + Arc::make_mut(&mut self.type_map).append(Arc::make_mut(&mut update.type_map)); } } @@ -1648,7 +1656,9 @@ impl TypeSignature { clarity_version: ClarityVersion, ) -> Result> { let mut trait_signature: BTreeMap = BTreeMap::new(); - let functions_types = type_args[0] + let functions_types = type_args + .get(0) + .ok_or_else(|| CheckErrors::InvalidTypeDescription)? .match_list() .ok_or(CheckErrors::DefineTraitBadSignature)?; diff --git a/clarity/src/vm/variables.rs b/clarity/src/vm/variables.rs index 539e14c39e4..a5947d00cd2 100644 --- a/clarity/src/vm/variables.rs +++ b/clarity/src/vm/variables.rs @@ -14,6 +14,8 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . +use stacks_common::types::StacksEpochId; + use super::errors::InterpreterError; use crate::vm::contexts::{Environment, LocalContext}; use crate::vm::costs::cost_functions::ClarityCostFunction; @@ -22,36 +24,23 @@ use crate::vm::errors::{InterpreterResult as Result, RuntimeErrorType}; use crate::vm::types::{BuffData, Value}; use crate::vm::ClarityVersion; -define_versioned_named_enum!(NativeVariables(ClarityVersion) { - ContractCaller("contract-caller", ClarityVersion::Clarity1), - TxSender("tx-sender", ClarityVersion::Clarity1), - BlockHeight("block-height", ClarityVersion::Clarity1), - BurnBlockHeight("burn-block-height", ClarityVersion::Clarity1), - NativeNone("none", ClarityVersion::Clarity1), - NativeTrue("true", ClarityVersion::Clarity1), - NativeFalse("false", ClarityVersion::Clarity1), - TotalLiquidMicroSTX("stx-liquid-supply", ClarityVersion::Clarity1), - Regtest("is-in-regtest", ClarityVersion::Clarity1), - TxSponsor("tx-sponsor?", ClarityVersion::Clarity2), - Mainnet("is-in-mainnet", ClarityVersion::Clarity2), - ChainId("chain-id", ClarityVersion::Clarity2), +define_versioned_named_enum_with_max!(NativeVariables(ClarityVersion) { + ContractCaller("contract-caller", ClarityVersion::Clarity1, None), + TxSender("tx-sender", ClarityVersion::Clarity1, None), + BlockHeight("block-height", ClarityVersion::Clarity1, Some(ClarityVersion::Clarity2)), + BurnBlockHeight("burn-block-height", ClarityVersion::Clarity1, None), + NativeNone("none", ClarityVersion::Clarity1, None), + NativeTrue("true", ClarityVersion::Clarity1, None), + NativeFalse("false", ClarityVersion::Clarity1, None), + TotalLiquidMicroSTX("stx-liquid-supply", ClarityVersion::Clarity1, None), + Regtest("is-in-regtest", ClarityVersion::Clarity1, None), + TxSponsor("tx-sponsor?", ClarityVersion::Clarity2, None), + Mainnet("is-in-mainnet", ClarityVersion::Clarity2, None), + ChainId("chain-id", ClarityVersion::Clarity2, None), + StacksBlockHeight("stacks-block-height", ClarityVersion::Clarity3, None), + TenureHeight("tenure-height", ClarityVersion::Clarity3, None), }); -impl NativeVariables { - pub fn lookup_by_name_at_version( - name: &str, - version: &ClarityVersion, - ) -> Option { - NativeVariables::lookup_by_name(name).and_then(|native_function| { - if &native_function.get_version() <= version { - Some(native_function) - } else { - None - } - }) - } -} - pub fn is_reserved_name(name: &str, version: &ClarityVersion) -> bool { NativeVariables::lookup_by_name_at_version(name, version).is_some() } @@ -92,8 +81,19 @@ pub fn lookup_reserved_variable( } NativeVariables::BlockHeight => { runtime_cost(ClarityCostFunction::FetchVar, env, 1)?; - let block_height = env.global_context.database.get_current_block_height(); - Ok(Some(Value::UInt(block_height as u128))) + // In epoch 2.x, the `block-height` keyword returns the Stacks block height. + // For Clarity 1 and Clarity 2 contracts executing in epoch 3, `block-height` + // is equal to the tenure height instead of the Stacks block height. This change + // is made to maintain a similar pace at which this value increments (e.g. for use + // as an expiration). In Clarity 3, `block-height` is removed to avoid confusion. + // It is replaced with two new keywords: `stacks-block-height` and `tenure-height`. + if env.global_context.epoch_id < StacksEpochId::Epoch30 { + let block_height = env.global_context.database.get_current_block_height(); + Ok(Some(Value::UInt(block_height as u128))) + } else { + let tenure_height = env.global_context.database.get_tenure_height()?; + Ok(Some(Value::UInt(tenure_height as u128))) + } } NativeVariables::BurnBlockHeight => { runtime_cost(ClarityCostFunction::FetchVar, env, 1)?; @@ -123,6 +123,16 @@ pub fn lookup_reserved_variable( let chain_id = env.global_context.chain_id; Ok(Some(Value::UInt(chain_id.into()))) } + NativeVariables::StacksBlockHeight => { + runtime_cost(ClarityCostFunction::FetchVar, env, 1)?; + let block_height = env.global_context.database.get_current_block_height(); + Ok(Some(Value::UInt(block_height as u128))) + } + NativeVariables::TenureHeight => { + runtime_cost(ClarityCostFunction::FetchVar, env, 1)?; + let tenure_height = env.global_context.database.get_tenure_height()?; + Ok(Some(Value::UInt(tenure_height as u128))) + } } } else { Ok(None) diff --git a/clarity/src/vm/version.rs b/clarity/src/vm/version.rs index f64d4ee8784..4c437d52ccc 100644 --- a/clarity/src/vm/version.rs +++ b/clarity/src/vm/version.rs @@ -9,6 +9,7 @@ use crate::vm::errors::{Error, RuntimeErrorType}; pub enum ClarityVersion { Clarity1, Clarity2, + Clarity3, } impl fmt::Display for ClarityVersion { @@ -16,13 +17,14 @@ impl fmt::Display for ClarityVersion { match self { ClarityVersion::Clarity1 => write!(f, "Clarity 1"), ClarityVersion::Clarity2 => write!(f, "Clarity 2"), + ClarityVersion::Clarity3 => write!(f, "Clarity 3"), } } } impl ClarityVersion { pub fn latest() -> ClarityVersion { - ClarityVersion::Clarity2 + ClarityVersion::Clarity3 } pub fn default_for_epoch(epoch_id: StacksEpochId) -> ClarityVersion { match epoch_id { @@ -37,7 +39,7 @@ impl ClarityVersion { StacksEpochId::Epoch23 => ClarityVersion::Clarity2, StacksEpochId::Epoch24 => ClarityVersion::Clarity2, StacksEpochId::Epoch25 => ClarityVersion::Clarity2, - StacksEpochId::Epoch30 => ClarityVersion::Clarity2, + StacksEpochId::Epoch30 => ClarityVersion::Clarity3, } } } @@ -51,9 +53,12 @@ impl FromStr for ClarityVersion { Ok(ClarityVersion::Clarity1) } else if s == "clarity2" { Ok(ClarityVersion::Clarity2) + } else if s == "clarity3" { + Ok(ClarityVersion::Clarity3) } else { Err(RuntimeErrorType::ParseError( - "Invalid clarity version. Valid versions are: Clarity1, Clarity2.".to_string(), + "Invalid clarity version. Valid versions are: Clarity1, Clarity2, Clarity3." + .to_string(), ) .into()) } diff --git a/contrib/boot-contracts-stateful-prop-tests/.gitignore b/contrib/boot-contracts-stateful-prop-tests/.gitignore new file mode 100644 index 00000000000..393158bd1c0 --- /dev/null +++ b/contrib/boot-contracts-stateful-prop-tests/.gitignore @@ -0,0 +1,8 @@ +logs +*.log +npm-debug.log* +coverage +*.info +costs-reports.json +node_modules +history.txt diff --git a/contrib/boot-contracts-stateful-prop-tests/Clarinet.toml b/contrib/boot-contracts-stateful-prop-tests/Clarinet.toml new file mode 100644 index 00000000000..f0d404a755c --- /dev/null +++ b/contrib/boot-contracts-stateful-prop-tests/Clarinet.toml @@ -0,0 +1,2 @@ +[project] +name = "boot-contracts-stateful-prop-tests" diff --git a/contrib/boot-contracts-stateful-prop-tests/deployments/default.simnet-plan.yaml b/contrib/boot-contracts-stateful-prop-tests/deployments/default.simnet-plan.yaml new file mode 100644 index 00000000000..1837aee68a4 --- /dev/null +++ b/contrib/boot-contracts-stateful-prop-tests/deployments/default.simnet-plan.yaml @@ -0,0 +1,52 @@ +--- +id: 0 +name: "Simulated deployment, used as a default for `clarinet console`, `clarinet test` and `clarinet check`" +network: simnet +genesis: + wallets: + - name: deployer + address: ST1PQHQKV0RJXZFY1DGX8MNSNYVE3VGZJSRTPGZGM + balance: "100000000000000" + - name: wallet_1 + address: ST1SJ3DTE5DN7X54YDH5D64R3BCB6A2AG2ZQ8YPD5 + balance: "100000000000000" + - name: wallet_2 + address: ST2CY5V39NHDPWSXMW9QDT3HC3GD6Q6XX4CFRK9AG + balance: "100000000000000" + - name: wallet_3 + address: ST2JHG361ZXG51QTKY2NQCVBPPRRE2KZB1HR05NNC + balance: "100000000000000" + - name: wallet_4 + address: ST2NEB84ASENDXKYGJPQW86YXQCEFEX2ZQPG87ND + balance: "100000000000000" + - name: wallet_5 + address: ST2REHHS5J3CERCRBEPMGH7921Q6PYKAADT7JP2VB + balance: "100000000000000" + - name: wallet_6 + address: ST3AM1A56AK2C1XAFJ4115ZSV26EB49BVQ10MGCS0 + balance: "100000000000000" + - name: wallet_7 + address: ST3PF13W7Z0RRM42A8VZRVFQ75SV1K26RXEP8YGKJ + balance: "100000000000000" + - name: wallet_8 + address: ST3NBRSFKX28FQ2ZJ1MAKX58HKHSDGNV5N7R21XCP + balance: "100000000000000" + - name: wallet_9 + address: STNHKEPYEPJ8ET55ZZ0M5A34J0R3N5FM2CMMMAZ6 + balance: "100000000000000" + contracts: + - costs + - pox + - pox-2 + - pox-3 + - pox-4 + - lockup + - costs-2 + - costs-3 + - cost-voting + - bns +plan: + batches: + - id: 0 + transactions: [] + epoch: "2.4" diff --git a/contrib/boot-contracts-stateful-prop-tests/package-lock.json b/contrib/boot-contracts-stateful-prop-tests/package-lock.json new file mode 100644 index 00000000000..e3040db2e21 --- /dev/null +++ b/contrib/boot-contracts-stateful-prop-tests/package-lock.json @@ -0,0 +1,2394 @@ +{ + "name": "boot-contracts-stateful-prop-tests", + "version": "1.0.0", + "lockfileVersion": 3, + "requires": true, + "packages": { + "": { + "name": "boot-contracts-stateful-prop-tests", + "version": "1.0.0", + "license": "ISC", + "dependencies": { + "@hirosystems/clarinet-sdk": "^2.6.0", + "@stacks/stacking": "^6.14.0", + "@stacks/transactions": "^6.13.1", + "chokidar-cli": "^3.0.0", + "fast-check": "^3.18.0", + "typescript": "^5.4.5", + "vite": "^5.2.10", + "vitest": "^1.5.2", + "vitest-environment-clarinet": "^2.1.0" + } + }, + "node_modules/@esbuild/aix-ppc64": { + "version": "0.20.2", + "resolved": "https://registry.npmjs.org/@esbuild/aix-ppc64/-/aix-ppc64-0.20.2.tgz", + "integrity": "sha512-D+EBOJHXdNZcLJRBkhENNG8Wji2kgc9AZ9KiPr1JuZjsNtyHzrsfLRrY0tk2H2aoFu6RANO1y1iPPUCDYWkb5g==", + "cpu": [ + "ppc64" + ], + "optional": true, + "os": [ + "aix" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/android-arm": { + "version": "0.20.2", + "resolved": "https://registry.npmjs.org/@esbuild/android-arm/-/android-arm-0.20.2.tgz", + "integrity": "sha512-t98Ra6pw2VaDhqNWO2Oph2LXbz/EJcnLmKLGBJwEwXX/JAN83Fym1rU8l0JUWK6HkIbWONCSSatf4sf2NBRx/w==", + "cpu": [ + "arm" + ], + "optional": true, + "os": [ + "android" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/android-arm64": { + "version": "0.20.2", + "resolved": "https://registry.npmjs.org/@esbuild/android-arm64/-/android-arm64-0.20.2.tgz", + "integrity": "sha512-mRzjLacRtl/tWU0SvD8lUEwb61yP9cqQo6noDZP/O8VkwafSYwZ4yWy24kan8jE/IMERpYncRt2dw438LP3Xmg==", + "cpu": [ + "arm64" + ], + "optional": true, + "os": [ + "android" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/android-x64": { + "version": "0.20.2", + "resolved": "https://registry.npmjs.org/@esbuild/android-x64/-/android-x64-0.20.2.tgz", + "integrity": "sha512-btzExgV+/lMGDDa194CcUQm53ncxzeBrWJcncOBxuC6ndBkKxnHdFJn86mCIgTELsooUmwUm9FkhSp5HYu00Rg==", + "cpu": [ + "x64" + ], + "optional": true, + "os": [ + "android" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/darwin-arm64": { + "version": "0.20.2", + "resolved": "https://registry.npmjs.org/@esbuild/darwin-arm64/-/darwin-arm64-0.20.2.tgz", + "integrity": "sha512-4J6IRT+10J3aJH3l1yzEg9y3wkTDgDk7TSDFX+wKFiWjqWp/iCfLIYzGyasx9l0SAFPT1HwSCR+0w/h1ES/MjA==", + "cpu": [ + "arm64" + ], + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/darwin-x64": { + "version": "0.20.2", + "resolved": "https://registry.npmjs.org/@esbuild/darwin-x64/-/darwin-x64-0.20.2.tgz", + "integrity": "sha512-tBcXp9KNphnNH0dfhv8KYkZhjc+H3XBkF5DKtswJblV7KlT9EI2+jeA8DgBjp908WEuYll6pF+UStUCfEpdysA==", + "cpu": [ + "x64" + ], + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/freebsd-arm64": { + "version": "0.20.2", + "resolved": "https://registry.npmjs.org/@esbuild/freebsd-arm64/-/freebsd-arm64-0.20.2.tgz", + "integrity": "sha512-d3qI41G4SuLiCGCFGUrKsSeTXyWG6yem1KcGZVS+3FYlYhtNoNgYrWcvkOoaqMhwXSMrZRl69ArHsGJ9mYdbbw==", + "cpu": [ + "arm64" + ], + "optional": true, + "os": [ + "freebsd" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/freebsd-x64": { + "version": "0.20.2", + "resolved": "https://registry.npmjs.org/@esbuild/freebsd-x64/-/freebsd-x64-0.20.2.tgz", + "integrity": "sha512-d+DipyvHRuqEeM5zDivKV1KuXn9WeRX6vqSqIDgwIfPQtwMP4jaDsQsDncjTDDsExT4lR/91OLjRo8bmC1e+Cw==", + "cpu": [ + "x64" + ], + "optional": true, + "os": [ + "freebsd" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/linux-arm": { + "version": "0.20.2", + "resolved": "https://registry.npmjs.org/@esbuild/linux-arm/-/linux-arm-0.20.2.tgz", + "integrity": "sha512-VhLPeR8HTMPccbuWWcEUD1Az68TqaTYyj6nfE4QByZIQEQVWBB8vup8PpR7y1QHL3CpcF6xd5WVBU/+SBEvGTg==", + "cpu": [ + "arm" + ], + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/linux-arm64": { + "version": "0.20.2", + "resolved": "https://registry.npmjs.org/@esbuild/linux-arm64/-/linux-arm64-0.20.2.tgz", + "integrity": "sha512-9pb6rBjGvTFNira2FLIWqDk/uaf42sSyLE8j1rnUpuzsODBq7FvpwHYZxQ/It/8b+QOS1RYfqgGFNLRI+qlq2A==", + "cpu": [ + "arm64" + ], + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/linux-ia32": { + "version": "0.20.2", + "resolved": "https://registry.npmjs.org/@esbuild/linux-ia32/-/linux-ia32-0.20.2.tgz", + "integrity": "sha512-o10utieEkNPFDZFQm9CoP7Tvb33UutoJqg3qKf1PWVeeJhJw0Q347PxMvBgVVFgouYLGIhFYG0UGdBumROyiig==", + "cpu": [ + "ia32" + ], + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/linux-loong64": { + "version": "0.20.2", + "resolved": "https://registry.npmjs.org/@esbuild/linux-loong64/-/linux-loong64-0.20.2.tgz", + "integrity": "sha512-PR7sp6R/UC4CFVomVINKJ80pMFlfDfMQMYynX7t1tNTeivQ6XdX5r2XovMmha/VjR1YN/HgHWsVcTRIMkymrgQ==", + "cpu": [ + "loong64" + ], + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/linux-mips64el": { + "version": "0.20.2", + "resolved": "https://registry.npmjs.org/@esbuild/linux-mips64el/-/linux-mips64el-0.20.2.tgz", + "integrity": "sha512-4BlTqeutE/KnOiTG5Y6Sb/Hw6hsBOZapOVF6njAESHInhlQAghVVZL1ZpIctBOoTFbQyGW+LsVYZ8lSSB3wkjA==", + "cpu": [ + "mips64el" + ], + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/linux-ppc64": { + "version": "0.20.2", + "resolved": "https://registry.npmjs.org/@esbuild/linux-ppc64/-/linux-ppc64-0.20.2.tgz", + "integrity": "sha512-rD3KsaDprDcfajSKdn25ooz5J5/fWBylaaXkuotBDGnMnDP1Uv5DLAN/45qfnf3JDYyJv/ytGHQaziHUdyzaAg==", + "cpu": [ + "ppc64" + ], + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/linux-riscv64": { + "version": "0.20.2", + "resolved": "https://registry.npmjs.org/@esbuild/linux-riscv64/-/linux-riscv64-0.20.2.tgz", + "integrity": "sha512-snwmBKacKmwTMmhLlz/3aH1Q9T8v45bKYGE3j26TsaOVtjIag4wLfWSiZykXzXuE1kbCE+zJRmwp+ZbIHinnVg==", + "cpu": [ + "riscv64" + ], + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/linux-s390x": { + "version": "0.20.2", + "resolved": "https://registry.npmjs.org/@esbuild/linux-s390x/-/linux-s390x-0.20.2.tgz", + "integrity": "sha512-wcWISOobRWNm3cezm5HOZcYz1sKoHLd8VL1dl309DiixxVFoFe/o8HnwuIwn6sXre88Nwj+VwZUvJf4AFxkyrQ==", + "cpu": [ + "s390x" + ], + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/linux-x64": { + "version": "0.20.2", + "resolved": "https://registry.npmjs.org/@esbuild/linux-x64/-/linux-x64-0.20.2.tgz", + "integrity": "sha512-1MdwI6OOTsfQfek8sLwgyjOXAu+wKhLEoaOLTjbijk6E2WONYpH9ZU2mNtR+lZ2B4uwr+usqGuVfFT9tMtGvGw==", + "cpu": [ + "x64" + ], + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/netbsd-x64": { + "version": "0.20.2", + "resolved": "https://registry.npmjs.org/@esbuild/netbsd-x64/-/netbsd-x64-0.20.2.tgz", + "integrity": "sha512-K8/DhBxcVQkzYc43yJXDSyjlFeHQJBiowJ0uVL6Tor3jGQfSGHNNJcWxNbOI8v5k82prYqzPuwkzHt3J1T1iZQ==", + "cpu": [ + "x64" + ], + "optional": true, + "os": [ + "netbsd" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/openbsd-x64": { + "version": "0.20.2", + "resolved": "https://registry.npmjs.org/@esbuild/openbsd-x64/-/openbsd-x64-0.20.2.tgz", + "integrity": "sha512-eMpKlV0SThJmmJgiVyN9jTPJ2VBPquf6Kt/nAoo6DgHAoN57K15ZghiHaMvqjCye/uU4X5u3YSMgVBI1h3vKrQ==", + "cpu": [ + "x64" + ], + "optional": true, + "os": [ + "openbsd" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/sunos-x64": { + "version": "0.20.2", + "resolved": "https://registry.npmjs.org/@esbuild/sunos-x64/-/sunos-x64-0.20.2.tgz", + "integrity": "sha512-2UyFtRC6cXLyejf/YEld4Hajo7UHILetzE1vsRcGL3earZEW77JxrFjH4Ez2qaTiEfMgAXxfAZCm1fvM/G/o8w==", + "cpu": [ + "x64" + ], + "optional": true, + "os": [ + "sunos" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/win32-arm64": { + "version": "0.20.2", + "resolved": "https://registry.npmjs.org/@esbuild/win32-arm64/-/win32-arm64-0.20.2.tgz", + "integrity": "sha512-GRibxoawM9ZCnDxnP3usoUDO9vUkpAxIIZ6GQI+IlVmr5kP3zUq+l17xELTHMWTWzjxa2guPNyrpq1GWmPvcGQ==", + "cpu": [ + "arm64" + ], + "optional": true, + "os": [ + "win32" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/win32-ia32": { + "version": "0.20.2", + "resolved": "https://registry.npmjs.org/@esbuild/win32-ia32/-/win32-ia32-0.20.2.tgz", + "integrity": "sha512-HfLOfn9YWmkSKRQqovpnITazdtquEW8/SoHW7pWpuEeguaZI4QnCRW6b+oZTztdBnZOS2hqJ6im/D5cPzBTTlQ==", + "cpu": [ + "ia32" + ], + "optional": true, + "os": [ + "win32" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/win32-x64": { + "version": "0.20.2", + "resolved": "https://registry.npmjs.org/@esbuild/win32-x64/-/win32-x64-0.20.2.tgz", + "integrity": "sha512-N49X4lJX27+l9jbLKSqZ6bKNjzQvHaT8IIFUy+YIqmXQdjYCToGWwOItDrfby14c78aDd5NHQl29xingXfCdLQ==", + "cpu": [ + "x64" + ], + "optional": true, + "os": [ + "win32" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@hirosystems/clarinet-sdk": { + "version": "2.6.0", + "resolved": "https://registry.npmjs.org/@hirosystems/clarinet-sdk/-/clarinet-sdk-2.6.0.tgz", + "integrity": "sha512-8qyvpaeTmhn/Lrsg7zjNpIr9Ova1zVfzMNeBC4+y42tqxHX0j6MM58nr5m56bz5/0u+KPOvQpAhuVxGR27/NiA==", + "dependencies": { + "@hirosystems/clarinet-sdk-wasm": "^2.6.0", + "@stacks/encryption": "^6.13.0", + "@stacks/network": "^6.13.0", + "@stacks/stacking": "^6.13.0", + "@stacks/transactions": "^6.13.0", + "kolorist": "^1.8.0", + "prompts": "^2.4.2", + "vitest": "^1.0.4", + "yargs": "^17.7.2" + }, + "bin": { + "clarinet-sdk": "dist/cjs/bin/index.js" + }, + "engines": { + "node": ">=18.0.0" + } + }, + "node_modules/@hirosystems/clarinet-sdk-wasm": { + "version": "2.6.0", + "resolved": "https://registry.npmjs.org/@hirosystems/clarinet-sdk-wasm/-/clarinet-sdk-wasm-2.6.0.tgz", + "integrity": "sha512-cUpYrnLX4VnpnumlYTCUNf1gFfl2kL18q63C1qFzUzkjFszffR+x0U2lxOQrz3EY3/U6eWeZvZPdKbOFO3zgqQ==" + }, + "node_modules/@jest/schemas": { + "version": "29.6.3", + "resolved": "https://registry.npmjs.org/@jest/schemas/-/schemas-29.6.3.tgz", + "integrity": "sha512-mo5j5X+jIZmJQveBKeS/clAueipV7KgiX1vMgCxam1RNYiqE1w62n0/tJJnHtjW8ZHcQco5gY85jA3mi0L+nSA==", + "dependencies": { + "@sinclair/typebox": "^0.27.8" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/@jridgewell/sourcemap-codec": { + "version": "1.4.15", + "resolved": "https://registry.npmjs.org/@jridgewell/sourcemap-codec/-/sourcemap-codec-1.4.15.tgz", + "integrity": "sha512-eF2rxCRulEKXHTRiDrDy6erMYWqNw4LPdQ8UQA4huuxaQsVeRPFl2oM8oDGxMFhJUWZf9McpLtJasDDZb/Bpeg==" + }, + "node_modules/@noble/hashes": { + "version": "1.1.5", + "resolved": "https://registry.npmjs.org/@noble/hashes/-/hashes-1.1.5.tgz", + "integrity": "sha512-LTMZiiLc+V4v1Yi16TD6aX2gmtKszNye0pQgbaLqkvhIqP7nVsSaJsWloGQjJfJ8offaoP5GtX3yY5swbcJxxQ==", + "funding": [ + { + "type": "individual", + "url": "https://paulmillr.com/funding/" + } + ] + }, + "node_modules/@noble/secp256k1": { + "version": "1.7.1", + "resolved": "https://registry.npmjs.org/@noble/secp256k1/-/secp256k1-1.7.1.tgz", + "integrity": "sha512-hOUk6AyBFmqVrv7k5WAw/LpszxVbj9gGN4JRkIX52fdFAj1UA61KXmZDvqVEm+pOyec3+fIeZB02LYa/pWOArw==", + "funding": [ + { + "type": "individual", + "url": "https://paulmillr.com/funding/" + } + ] + }, + "node_modules/@rollup/rollup-android-arm-eabi": { + "version": "4.17.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-android-arm-eabi/-/rollup-android-arm-eabi-4.17.1.tgz", + "integrity": "sha512-P6Wg856Ou/DLpR+O0ZLneNmrv7QpqBg+hK4wE05ijbC/t349BRfMfx+UFj5Ha3fCFopIa6iSZlpdaB4agkWp2Q==", + "cpu": [ + "arm" + ], + "optional": true, + "os": [ + "android" + ] + }, + "node_modules/@rollup/rollup-android-arm64": { + "version": "4.17.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-android-arm64/-/rollup-android-arm64-4.17.1.tgz", + "integrity": "sha512-piwZDjuW2WiHr05djVdUkrG5JbjnGbtx8BXQchYCMfib/nhjzWoiScelZ+s5IJI7lecrwSxHCzW026MWBL+oJQ==", + "cpu": [ + "arm64" + ], + "optional": true, + "os": [ + "android" + ] + }, + "node_modules/@rollup/rollup-darwin-arm64": { + "version": "4.17.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-darwin-arm64/-/rollup-darwin-arm64-4.17.1.tgz", + "integrity": "sha512-LsZXXIsN5Q460cKDT4Y+bzoPDhBmO5DTr7wP80d+2EnYlxSgkwdPfE3hbE+Fk8dtya+8092N9srjBTJ0di8RIA==", + "cpu": [ + "arm64" + ], + "optional": true, + "os": [ + "darwin" + ] + }, + "node_modules/@rollup/rollup-darwin-x64": { + "version": "4.17.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-darwin-x64/-/rollup-darwin-x64-4.17.1.tgz", + "integrity": "sha512-S7TYNQpWXB9APkxu/SLmYHezWwCoZRA9QLgrDeml+SR2A1LLPD2DBUdUlvmCF7FUpRMKvbeeWky+iizQj65Etw==", + "cpu": [ + "x64" + ], + "optional": true, + "os": [ + "darwin" + ] + }, + "node_modules/@rollup/rollup-linux-arm-gnueabihf": { + "version": "4.17.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm-gnueabihf/-/rollup-linux-arm-gnueabihf-4.17.1.tgz", + "integrity": "sha512-Lq2JR5a5jsA5um2ZoLiXXEaOagnVyCpCW7xvlcqHC7y46tLwTEgUSTM3a2TfmmTMmdqv+jknUioWXlmxYxE9Yw==", + "cpu": [ + "arm" + ], + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-arm-musleabihf": { + "version": "4.17.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm-musleabihf/-/rollup-linux-arm-musleabihf-4.17.1.tgz", + "integrity": "sha512-9BfzwyPNV0IizQoR+5HTNBGkh1KXE8BqU0DBkqMngmyFW7BfuIZyMjQ0s6igJEiPSBvT3ZcnIFohZ19OqjhDPg==", + "cpu": [ + "arm" + ], + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-arm64-gnu": { + "version": "4.17.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm64-gnu/-/rollup-linux-arm64-gnu-4.17.1.tgz", + "integrity": "sha512-e2uWaoxo/rtzA52OifrTSXTvJhAXb0XeRkz4CdHBK2KtxrFmuU/uNd544Ogkpu938BzEfvmWs8NZ8Axhw33FDw==", + "cpu": [ + "arm64" + ], + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-arm64-musl": { + "version": "4.17.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm64-musl/-/rollup-linux-arm64-musl-4.17.1.tgz", + "integrity": "sha512-ekggix/Bc/d/60H1Mi4YeYb/7dbal1kEDZ6sIFVAE8pUSx7PiWeEh+NWbL7bGu0X68BBIkgF3ibRJe1oFTksQQ==", + "cpu": [ + "arm64" + ], + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-powerpc64le-gnu": { + "version": "4.17.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-powerpc64le-gnu/-/rollup-linux-powerpc64le-gnu-4.17.1.tgz", + "integrity": "sha512-UGV0dUo/xCv4pkr/C8KY7XLFwBNnvladt8q+VmdKrw/3RUd3rD0TptwjisvE2TTnnlENtuY4/PZuoOYRiGp8Gw==", + "cpu": [ + "ppc64" + ], + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-riscv64-gnu": { + "version": "4.17.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-riscv64-gnu/-/rollup-linux-riscv64-gnu-4.17.1.tgz", + "integrity": "sha512-gEYmYYHaehdvX46mwXrU49vD6Euf1Bxhq9pPb82cbUU9UT2NV+RSckQ5tKWOnNXZixKsy8/cPGtiUWqzPuAcXQ==", + "cpu": [ + "riscv64" + ], + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-s390x-gnu": { + "version": "4.17.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-s390x-gnu/-/rollup-linux-s390x-gnu-4.17.1.tgz", + "integrity": "sha512-xeae5pMAxHFp6yX5vajInG2toST5lsCTrckSRUFwNgzYqnUjNBcQyqk1bXUxX5yhjWFl2Mnz3F8vQjl+2FRIcw==", + "cpu": [ + "s390x" + ], + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-x64-gnu": { + "version": "4.17.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-x64-gnu/-/rollup-linux-x64-gnu-4.17.1.tgz", + "integrity": "sha512-AsdnINQoDWfKpBzCPqQWxSPdAWzSgnYbrJYtn6W0H2E9It5bZss99PiLA8CgmDRfvKygt20UpZ3xkhFlIfX9zQ==", + "cpu": [ + "x64" + ], + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-x64-musl": { + "version": "4.17.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-x64-musl/-/rollup-linux-x64-musl-4.17.1.tgz", + "integrity": "sha512-KoB4fyKXTR+wYENkIG3fFF+5G6N4GFvzYx8Jax8BR4vmddtuqSb5oQmYu2Uu067vT/Fod7gxeQYKupm8gAcMSQ==", + "cpu": [ + "x64" + ], + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-win32-arm64-msvc": { + "version": "4.17.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-arm64-msvc/-/rollup-win32-arm64-msvc-4.17.1.tgz", + "integrity": "sha512-J0d3NVNf7wBL9t4blCNat+d0PYqAx8wOoY+/9Q5cujnafbX7BmtYk3XvzkqLmFECaWvXGLuHmKj/wrILUinmQg==", + "cpu": [ + "arm64" + ], + "optional": true, + "os": [ + "win32" + ] + }, + "node_modules/@rollup/rollup-win32-ia32-msvc": { + "version": "4.17.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-ia32-msvc/-/rollup-win32-ia32-msvc-4.17.1.tgz", + "integrity": "sha512-xjgkWUwlq7IbgJSIxvl516FJ2iuC/7ttjsAxSPpC9kkI5iQQFHKyEN5BjbhvJ/IXIZ3yIBcW5QDlWAyrA+TFag==", + "cpu": [ + "ia32" + ], + "optional": true, + "os": [ + "win32" + ] + }, + "node_modules/@rollup/rollup-win32-x64-msvc": { + "version": "4.17.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-x64-msvc/-/rollup-win32-x64-msvc-4.17.1.tgz", + "integrity": "sha512-0QbCkfk6cnnVKWqqlC0cUrrUMDMfu5ffvYMTUHf+qMN2uAb3MKP31LPcwiMXBNsvoFGs/kYdFOsuLmvppCopXA==", + "cpu": [ + "x64" + ], + "optional": true, + "os": [ + "win32" + ] + }, + "node_modules/@scure/base": { + "version": "1.1.6", + "resolved": "https://registry.npmjs.org/@scure/base/-/base-1.1.6.tgz", + "integrity": "sha512-ok9AWwhcgYuGG3Zfhyqg+zwl+Wn5uE+dwC0NV/2qQkx4dABbb/bx96vWu8NSj+BNjjSjno+JRYRjle1jV08k3g==", + "funding": { + "url": "https://paulmillr.com/funding/" + } + }, + "node_modules/@scure/bip39": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/@scure/bip39/-/bip39-1.1.0.tgz", + "integrity": "sha512-pwrPOS16VeTKg98dYXQyIjJEcWfz7/1YJIwxUEPFfQPtc86Ym/1sVgQ2RLoD43AazMk2l/unK4ITySSpW2+82w==", + "funding": [ + { + "type": "individual", + "url": "https://paulmillr.com/funding/" + } + ], + "dependencies": { + "@noble/hashes": "~1.1.1", + "@scure/base": "~1.1.0" + } + }, + "node_modules/@sinclair/typebox": { + "version": "0.27.8", + "resolved": "https://registry.npmjs.org/@sinclair/typebox/-/typebox-0.27.8.tgz", + "integrity": "sha512-+Fj43pSMwJs4KRrH/938Uf+uAELIgVBmQzg/q1YG10djyfA3TnrU8N8XzqCh/okZdszqBQTZf96idMfE5lnwTA==" + }, + "node_modules/@stacks/common": { + "version": "6.13.0", + "resolved": "https://registry.npmjs.org/@stacks/common/-/common-6.13.0.tgz", + "integrity": "sha512-wwzyihjaSdmL6NxKvDeayy3dqM0L0Q2sawmdNtzJDi0FnXuJGm5PeapJj7bEfcI9XwI7Bw5jZoC6mCn9nc5YIw==", + "dependencies": { + "@types/bn.js": "^5.1.0", + "@types/node": "^18.0.4" + } + }, + "node_modules/@stacks/encryption": { + "version": "6.13.1", + "resolved": "https://registry.npmjs.org/@stacks/encryption/-/encryption-6.13.1.tgz", + "integrity": "sha512-y5IFX3/nGI3fCk70gE0JwH70GpshD8RhUfvhMLcL96oNaec1cCdj1ZUiQupeicfYTHuraaVBYU9xLls4TRmypg==", + "dependencies": { + "@noble/hashes": "1.1.5", + "@noble/secp256k1": "1.7.1", + "@scure/bip39": "1.1.0", + "@stacks/common": "^6.13.0", + "@types/node": "^18.0.4", + "base64-js": "^1.5.1", + "bs58": "^5.0.0", + "ripemd160-min": "^0.0.6", + "varuint-bitcoin": "^1.1.2" + } + }, + "node_modules/@stacks/network": { + "version": "6.13.0", + "resolved": "https://registry.npmjs.org/@stacks/network/-/network-6.13.0.tgz", + "integrity": "sha512-Ss/Da4BNyPBBj1OieM981fJ7SkevKqLPkzoI1+Yo7cYR2df+0FipIN++Z4RfpJpc8ne60vgcx7nJZXQsiGhKBQ==", + "dependencies": { + "@stacks/common": "^6.13.0", + "cross-fetch": "^3.1.5" + } + }, + "node_modules/@stacks/stacking": { + "version": "6.14.0", + "resolved": "https://registry.npmjs.org/@stacks/stacking/-/stacking-6.14.0.tgz", + "integrity": "sha512-P6ITXYpb5q4hgWMPimJW84mih3hQuQ0ko7AcnJ4SPy17nt1rxEz7/zgyRnqg1Lc18zt4HqfF9SKM7+Sqt/EMZA==", + "dependencies": { + "@noble/hashes": "1.1.5", + "@scure/base": "1.1.1", + "@stacks/common": "^6.13.0", + "@stacks/encryption": "^6.13.1", + "@stacks/network": "^6.13.0", + "@stacks/stacks-blockchain-api-types": "^0.61.0", + "@stacks/transactions": "^6.13.1", + "bs58": "^5.0.0" + } + }, + "node_modules/@stacks/stacking/node_modules/@scure/base": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/@scure/base/-/base-1.1.1.tgz", + "integrity": "sha512-ZxOhsSyxYwLJj3pLZCefNitxsj093tb2vq90mp2txoYeBqbcjDjqFhyM8eUjq/uFm6zJ+mUuqxlS2FkuSY1MTA==", + "funding": [ + { + "type": "individual", + "url": "https://paulmillr.com/funding/" + } + ] + }, + "node_modules/@stacks/stacks-blockchain-api-types": { + "version": "0.61.0", + "resolved": "https://registry.npmjs.org/@stacks/stacks-blockchain-api-types/-/stacks-blockchain-api-types-0.61.0.tgz", + "integrity": "sha512-yPOfTUboo5eA9BZL/hqMcM71GstrFs9YWzOrJFPeP4cOO1wgYvAcckgBRbgiE3NqeX0A7SLZLDAXLZbATuRq9w==" + }, + "node_modules/@stacks/transactions": { + "version": "6.13.1", + "resolved": "https://registry.npmjs.org/@stacks/transactions/-/transactions-6.13.1.tgz", + "integrity": "sha512-PWw2I+2Fj3CaFYQIoVcqQN6E2qGHNhFv03nuR0CxMq0sx8stPgYZbdzUlnlBcJQdsFiHrw3sPeqnXDZt+Hg5YQ==", + "dependencies": { + "@noble/hashes": "1.1.5", + "@noble/secp256k1": "1.7.1", + "@stacks/common": "^6.13.0", + "@stacks/network": "^6.13.0", + "c32check": "^2.0.0", + "lodash.clonedeep": "^4.5.0" + } + }, + "node_modules/@types/bn.js": { + "version": "5.1.5", + "resolved": "https://registry.npmjs.org/@types/bn.js/-/bn.js-5.1.5.tgz", + "integrity": "sha512-V46N0zwKRF5Q00AZ6hWtN0T8gGmDUaUzLWQvHFo5yThtVwK/VCenFY3wXVbOvNfajEpsTfQM4IN9k/d6gUVX3A==", + "dependencies": { + "@types/node": "*" + } + }, + "node_modules/@types/estree": { + "version": "1.0.5", + "resolved": "https://registry.npmjs.org/@types/estree/-/estree-1.0.5.tgz", + "integrity": "sha512-/kYRxGDLWzHOB7q+wtSUQlFrtcdUccpfy+X+9iMBpHK8QLLhx2wIPYuS5DYtR9Wa/YlZAbIovy7qVdB1Aq6Lyw==" + }, + "node_modules/@types/node": { + "version": "18.19.31", + "resolved": "https://registry.npmjs.org/@types/node/-/node-18.19.31.tgz", + "integrity": "sha512-ArgCD39YpyyrtFKIqMDvjz79jto5fcI/SVUs2HwB+f0dAzq68yqOdyaSivLiLugSziTpNXLQrVb7RZFmdZzbhA==", + "dependencies": { + "undici-types": "~5.26.4" + } + }, + "node_modules/@vitest/expect": { + "version": "1.5.2", + "resolved": "https://registry.npmjs.org/@vitest/expect/-/expect-1.5.2.tgz", + "integrity": "sha512-rf7MTD1WCoDlN3FfYJ9Llfp0PbdtOMZ3FIF0AVkDnKbp3oiMW1c8AmvRZBcqbAhDUAvF52e9zx4WQM1r3oraVA==", + "dependencies": { + "@vitest/spy": "1.5.2", + "@vitest/utils": "1.5.2", + "chai": "^4.3.10" + }, + "funding": { + "url": "https://opencollective.com/vitest" + } + }, + "node_modules/@vitest/runner": { + "version": "1.5.2", + "resolved": "https://registry.npmjs.org/@vitest/runner/-/runner-1.5.2.tgz", + "integrity": "sha512-7IJ7sJhMZrqx7HIEpv3WrMYcq8ZNz9L6alo81Y6f8hV5mIE6yVZsFoivLZmr0D777klm1ReqonE9LyChdcmw6g==", + "dependencies": { + "@vitest/utils": "1.5.2", + "p-limit": "^5.0.0", + "pathe": "^1.1.1" + }, + "funding": { + "url": "https://opencollective.com/vitest" + } + }, + "node_modules/@vitest/snapshot": { + "version": "1.5.2", + "resolved": "https://registry.npmjs.org/@vitest/snapshot/-/snapshot-1.5.2.tgz", + "integrity": "sha512-CTEp/lTYos8fuCc9+Z55Ga5NVPKUgExritjF5VY7heRFUfheoAqBneUlvXSUJHUZPjnPmyZA96yLRJDP1QATFQ==", + "dependencies": { + "magic-string": "^0.30.5", + "pathe": "^1.1.1", + "pretty-format": "^29.7.0" + }, + "funding": { + "url": "https://opencollective.com/vitest" + } + }, + "node_modules/@vitest/spy": { + "version": "1.5.2", + "resolved": "https://registry.npmjs.org/@vitest/spy/-/spy-1.5.2.tgz", + "integrity": "sha512-xCcPvI8JpCtgikT9nLpHPL1/81AYqZy1GCy4+MCHBE7xi8jgsYkULpW5hrx5PGLgOQjUpb6fd15lqcriJ40tfQ==", + "dependencies": { + "tinyspy": "^2.2.0" + }, + "funding": { + "url": "https://opencollective.com/vitest" + } + }, + "node_modules/@vitest/utils": { + "version": "1.5.2", + "resolved": "https://registry.npmjs.org/@vitest/utils/-/utils-1.5.2.tgz", + "integrity": "sha512-sWOmyofuXLJ85VvXNsroZur7mOJGiQeM0JN3/0D1uU8U9bGFM69X1iqHaRXl6R8BwaLY6yPCogP257zxTzkUdA==", + "dependencies": { + "diff-sequences": "^29.6.3", + "estree-walker": "^3.0.3", + "loupe": "^2.3.7", + "pretty-format": "^29.7.0" + }, + "funding": { + "url": "https://opencollective.com/vitest" + } + }, + "node_modules/acorn": { + "version": "8.11.3", + "resolved": "https://registry.npmjs.org/acorn/-/acorn-8.11.3.tgz", + "integrity": "sha512-Y9rRfJG5jcKOE0CLisYbojUjIrIEE7AGMzA/Sm4BslANhbS+cDMpgBdcPT91oJ7OuJ9hYJBx59RjbhxVnrF8Xg==", + "bin": { + "acorn": "bin/acorn" + }, + "engines": { + "node": ">=0.4.0" + } + }, + "node_modules/acorn-walk": { + "version": "8.3.2", + "resolved": "https://registry.npmjs.org/acorn-walk/-/acorn-walk-8.3.2.tgz", + "integrity": "sha512-cjkyv4OtNCIeqhHrfS81QWXoCBPExR/J62oyEqepVw8WaQeSqpW2uhuLPh1m9eWhDuOo/jUXVTlifvesOWp/4A==", + "engines": { + "node": ">=0.4.0" + } + }, + "node_modules/ansi-regex": { + "version": "5.0.1", + "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-5.0.1.tgz", + "integrity": "sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ==", + "engines": { + "node": ">=8" + } + }, + "node_modules/ansi-styles": { + "version": "5.2.0", + "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-5.2.0.tgz", + "integrity": "sha512-Cxwpt2SfTzTtXcfOlzGEee8O+c+MmUgGrNiBcXnuWxuFJHe6a5Hz7qwhwe5OgaSYI0IJvkLqWX1ASG+cJOkEiA==", + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/chalk/ansi-styles?sponsor=1" + } + }, + "node_modules/anymatch": { + "version": "3.1.3", + "resolved": "https://registry.npmjs.org/anymatch/-/anymatch-3.1.3.tgz", + "integrity": "sha512-KMReFUr0B4t+D+OBkjR3KYqvocp2XaSzO55UcB6mgQMd3KbcE+mWTyvVV7D/zsdEbNnV6acZUutkiHQXvTr1Rw==", + "dependencies": { + "normalize-path": "^3.0.0", + "picomatch": "^2.0.4" + }, + "engines": { + "node": ">= 8" + } + }, + "node_modules/assertion-error": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/assertion-error/-/assertion-error-1.1.0.tgz", + "integrity": "sha512-jgsaNduz+ndvGyFt3uSuWqvy4lCnIJiovtouQN5JZHOKCS2QuhEdbcQHFhVksz2N2U9hXJo8odG7ETyWlEeuDw==", + "engines": { + "node": "*" + } + }, + "node_modules/base-x": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/base-x/-/base-x-4.0.0.tgz", + "integrity": "sha512-FuwxlW4H5kh37X/oW59pwTzzTKRzfrrQwhmyspRM7swOEZcHtDZSCt45U6oKgtuFE+WYPblePMVIPR4RZrh/hw==" + }, + "node_modules/base64-js": { + "version": "1.5.1", + "resolved": "https://registry.npmjs.org/base64-js/-/base64-js-1.5.1.tgz", + "integrity": "sha512-AKpaYlHn8t4SVbOHCy+b5+KKgvR4vrsD8vbvrbiQJps7fKDTkjkDry6ji0rUJjC0kzbNePLwzxq8iypo41qeWA==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/feross" + }, + { + "type": "patreon", + "url": "https://www.patreon.com/feross" + }, + { + "type": "consulting", + "url": "https://feross.org/support" + } + ] + }, + "node_modules/binary-extensions": { + "version": "2.3.0", + "resolved": "https://registry.npmjs.org/binary-extensions/-/binary-extensions-2.3.0.tgz", + "integrity": "sha512-Ceh+7ox5qe7LJuLHoY0feh3pHuUDHAcRUeyL2VYghZwfpkNIy/+8Ocg0a3UuSoYzavmylwuLWQOf3hl0jjMMIw==", + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/braces": { + "version": "3.0.2", + "resolved": "https://registry.npmjs.org/braces/-/braces-3.0.2.tgz", + "integrity": "sha512-b8um+L1RzM3WDSzvhm6gIz1yfTbBt6YTlcEKAvsmqCZZFw46z626lVj9j1yEPW33H5H+lBQpZMP1k8l+78Ha0A==", + "dependencies": { + "fill-range": "^7.0.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/bs58": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/bs58/-/bs58-5.0.0.tgz", + "integrity": "sha512-r+ihvQJvahgYT50JD05dyJNKlmmSlMoOGwn1lCcEzanPglg7TxYjioQUYehQ9mAR/+hOSd2jRc/Z2y5UxBymvQ==", + "dependencies": { + "base-x": "^4.0.0" + } + }, + "node_modules/c32check": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/c32check/-/c32check-2.0.0.tgz", + "integrity": "sha512-rpwfAcS/CMqo0oCqDf3r9eeLgScRE3l/xHDCXhM3UyrfvIn7PrLq63uHh7yYbv8NzaZn5MVsVhIRpQ+5GZ5HyA==", + "dependencies": { + "@noble/hashes": "^1.1.2", + "base-x": "^4.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/cac": { + "version": "6.7.14", + "resolved": "https://registry.npmjs.org/cac/-/cac-6.7.14.tgz", + "integrity": "sha512-b6Ilus+c3RrdDk+JhLKUAQfzzgLEPy6wcXqS7f/xe1EETvsDP6GORG7SFuOs6cID5YkqchW/LXZbX5bc8j7ZcQ==", + "engines": { + "node": ">=8" + } + }, + "node_modules/camelcase": { + "version": "5.3.1", + "resolved": "https://registry.npmjs.org/camelcase/-/camelcase-5.3.1.tgz", + "integrity": "sha512-L28STB170nwWS63UjtlEOE3dldQApaJXZkOI1uMFfzf3rRuPegHaHesyee+YxQ+W6SvRDQV6UrdOdRiR153wJg==", + "engines": { + "node": ">=6" + } + }, + "node_modules/chai": { + "version": "4.4.1", + "resolved": "https://registry.npmjs.org/chai/-/chai-4.4.1.tgz", + "integrity": "sha512-13sOfMv2+DWduEU+/xbun3LScLoqN17nBeTLUsmDfKdoiC1fr0n9PU4guu4AhRcOVFk/sW8LyZWHuhWtQZiF+g==", + "dependencies": { + "assertion-error": "^1.1.0", + "check-error": "^1.0.3", + "deep-eql": "^4.1.3", + "get-func-name": "^2.0.2", + "loupe": "^2.3.6", + "pathval": "^1.1.1", + "type-detect": "^4.0.8" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/check-error": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/check-error/-/check-error-1.0.3.tgz", + "integrity": "sha512-iKEoDYaRmd1mxM90a2OEfWhjsjPpYPuQ+lMYsoxB126+t8fw7ySEO48nmDg5COTjxDI65/Y2OWpeEHk3ZOe8zg==", + "dependencies": { + "get-func-name": "^2.0.2" + }, + "engines": { + "node": "*" + } + }, + "node_modules/chokidar": { + "version": "3.6.0", + "resolved": "https://registry.npmjs.org/chokidar/-/chokidar-3.6.0.tgz", + "integrity": "sha512-7VT13fmjotKpGipCW9JEQAusEPE+Ei8nl6/g4FBAmIm0GOOLMua9NDDo/DWp0ZAxCr3cPq5ZpBqmPAQgDda2Pw==", + "dependencies": { + "anymatch": "~3.1.2", + "braces": "~3.0.2", + "glob-parent": "~5.1.2", + "is-binary-path": "~2.1.0", + "is-glob": "~4.0.1", + "normalize-path": "~3.0.0", + "readdirp": "~3.6.0" + }, + "engines": { + "node": ">= 8.10.0" + }, + "funding": { + "url": "https://paulmillr.com/funding/" + }, + "optionalDependencies": { + "fsevents": "~2.3.2" + } + }, + "node_modules/chokidar-cli": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/chokidar-cli/-/chokidar-cli-3.0.0.tgz", + "integrity": "sha512-xVW+Qeh7z15uZRxHOkP93Ux8A0xbPzwK4GaqD8dQOYc34TlkqUhVSS59fK36DOp5WdJlrRzlYSy02Ht99FjZqQ==", + "dependencies": { + "chokidar": "^3.5.2", + "lodash.debounce": "^4.0.8", + "lodash.throttle": "^4.1.1", + "yargs": "^13.3.0" + }, + "bin": { + "chokidar": "index.js" + }, + "engines": { + "node": ">= 8.10.0" + } + }, + "node_modules/chokidar-cli/node_modules/ansi-regex": { + "version": "4.1.1", + "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-4.1.1.tgz", + "integrity": "sha512-ILlv4k/3f6vfQ4OoP2AGvirOktlQ98ZEL1k9FaQjxa3L1abBgbuTDAdPOpvbGncC0BTVQrl+OM8xZGK6tWXt7g==", + "engines": { + "node": ">=6" + } + }, + "node_modules/chokidar-cli/node_modules/ansi-styles": { + "version": "3.2.1", + "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-3.2.1.tgz", + "integrity": "sha512-VT0ZI6kZRdTh8YyJw3SMbYm/u+NqfsAxEpWO0Pf9sq8/e94WxxOpPKx9FR1FlyCtOVDNOQ+8ntlqFxiRc+r5qA==", + "dependencies": { + "color-convert": "^1.9.0" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/chokidar-cli/node_modules/cliui": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/cliui/-/cliui-5.0.0.tgz", + "integrity": "sha512-PYeGSEmmHM6zvoef2w8TPzlrnNpXIjTipYK780YswmIP9vjxmd6Y2a3CB2Ks6/AU8NHjZugXvo8w3oWM2qnwXA==", + "dependencies": { + "string-width": "^3.1.0", + "strip-ansi": "^5.2.0", + "wrap-ansi": "^5.1.0" + } + }, + "node_modules/chokidar-cli/node_modules/emoji-regex": { + "version": "7.0.3", + "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-7.0.3.tgz", + "integrity": "sha512-CwBLREIQ7LvYFB0WyRvwhq5N5qPhc6PMjD6bYggFlI5YyDgl+0vxq5VHbMOFqLg7hfWzmu8T5Z1QofhmTIhItA==" + }, + "node_modules/chokidar-cli/node_modules/is-fullwidth-code-point": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/is-fullwidth-code-point/-/is-fullwidth-code-point-2.0.0.tgz", + "integrity": "sha512-VHskAKYM8RfSFXwee5t5cbN5PZeq1Wrh6qd5bkyiXIf6UQcN6w/A0eXM9r6t8d+GYOh+o6ZhiEnb88LN/Y8m2w==", + "engines": { + "node": ">=4" + } + }, + "node_modules/chokidar-cli/node_modules/string-width": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/string-width/-/string-width-3.1.0.tgz", + "integrity": "sha512-vafcv6KjVZKSgz06oM/H6GDBrAtz8vdhQakGjFIvNrHA6y3HCF1CInLy+QLq8dTJPQ1b+KDUqDFctkdRW44e1w==", + "dependencies": { + "emoji-regex": "^7.0.1", + "is-fullwidth-code-point": "^2.0.0", + "strip-ansi": "^5.1.0" + }, + "engines": { + "node": ">=6" + } + }, + "node_modules/chokidar-cli/node_modules/strip-ansi": { + "version": "5.2.0", + "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-5.2.0.tgz", + "integrity": "sha512-DuRs1gKbBqsMKIZlrffwlug8MHkcnpjs5VPmL1PAh+mA30U0DTotfDZ0d2UUsXpPmPmMMJ6W773MaA3J+lbiWA==", + "dependencies": { + "ansi-regex": "^4.1.0" + }, + "engines": { + "node": ">=6" + } + }, + "node_modules/chokidar-cli/node_modules/wrap-ansi": { + "version": "5.1.0", + "resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-5.1.0.tgz", + "integrity": "sha512-QC1/iN/2/RPVJ5jYK8BGttj5z83LmSKmvbvrXPNCLZSEb32KKVDJDl/MOt2N01qU2H/FkzEa9PKto1BqDjtd7Q==", + "dependencies": { + "ansi-styles": "^3.2.0", + "string-width": "^3.0.0", + "strip-ansi": "^5.0.0" + }, + "engines": { + "node": ">=6" + } + }, + "node_modules/chokidar-cli/node_modules/y18n": { + "version": "4.0.3", + "resolved": "https://registry.npmjs.org/y18n/-/y18n-4.0.3.tgz", + "integrity": "sha512-JKhqTOwSrqNA1NY5lSztJ1GrBiUodLMmIZuLiDaMRJ+itFd+ABVE8XBjOvIWL+rSqNDC74LCSFmlb/U4UZ4hJQ==" + }, + "node_modules/chokidar-cli/node_modules/yargs": { + "version": "13.3.2", + "resolved": "https://registry.npmjs.org/yargs/-/yargs-13.3.2.tgz", + "integrity": "sha512-AX3Zw5iPruN5ie6xGRIDgqkT+ZhnRlZMLMHAs8tg7nRruy2Nb+i5o9bwghAogtM08q1dpr2LVoS8KSTMYpWXUw==", + "dependencies": { + "cliui": "^5.0.0", + "find-up": "^3.0.0", + "get-caller-file": "^2.0.1", + "require-directory": "^2.1.1", + "require-main-filename": "^2.0.0", + "set-blocking": "^2.0.0", + "string-width": "^3.0.0", + "which-module": "^2.0.0", + "y18n": "^4.0.0", + "yargs-parser": "^13.1.2" + } + }, + "node_modules/chokidar-cli/node_modules/yargs-parser": { + "version": "13.1.2", + "resolved": "https://registry.npmjs.org/yargs-parser/-/yargs-parser-13.1.2.tgz", + "integrity": "sha512-3lbsNRf/j+A4QuSZfDRA7HRSfWrzO0YjqTJd5kjAq37Zep1CEgaYmrH9Q3GwPiB9cHyd1Y1UwggGhJGoxipbzg==", + "dependencies": { + "camelcase": "^5.0.0", + "decamelize": "^1.2.0" + } + }, + "node_modules/cliui": { + "version": "8.0.1", + "resolved": "https://registry.npmjs.org/cliui/-/cliui-8.0.1.tgz", + "integrity": "sha512-BSeNnyus75C4//NQ9gQt1/csTXyo/8Sb+afLAkzAptFuMsod9HFokGNudZpi/oQV73hnVK+sR+5PVRMd+Dr7YQ==", + "dependencies": { + "string-width": "^4.2.0", + "strip-ansi": "^6.0.1", + "wrap-ansi": "^7.0.0" + }, + "engines": { + "node": ">=12" + } + }, + "node_modules/color-convert": { + "version": "1.9.3", + "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-1.9.3.tgz", + "integrity": "sha512-QfAUtd+vFdAtFQcC8CCyYt1fYWxSqAiK2cSD6zDB8N3cpsEBAvRxp9zOGg6G/SHHJYAT88/az/IuDGALsNVbGg==", + "dependencies": { + "color-name": "1.1.3" + } + }, + "node_modules/color-name": { + "version": "1.1.3", + "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.3.tgz", + "integrity": "sha512-72fSenhMw2HZMTVHeCA9KCmpEIbzWiQsjN+BHcBbS9vr1mtt+vJjPdksIBNUmKAW8TFUDPJK5SUU3QhE9NEXDw==" + }, + "node_modules/confbox": { + "version": "0.1.7", + "resolved": "https://registry.npmjs.org/confbox/-/confbox-0.1.7.tgz", + "integrity": "sha512-uJcB/FKZtBMCJpK8MQji6bJHgu1tixKPxRLeGkNzBoOZzpnZUJm0jm2/sBDWcuBx1dYgxV4JU+g5hmNxCyAmdA==" + }, + "node_modules/cross-fetch": { + "version": "3.1.8", + "resolved": "https://registry.npmjs.org/cross-fetch/-/cross-fetch-3.1.8.tgz", + "integrity": "sha512-cvA+JwZoU0Xq+h6WkMvAUqPEYy92Obet6UdKLfW60qn99ftItKjB5T+BkyWOFWe2pUyfQ+IJHmpOTznqk1M6Kg==", + "dependencies": { + "node-fetch": "^2.6.12" + } + }, + "node_modules/cross-spawn": { + "version": "7.0.3", + "resolved": "https://registry.npmjs.org/cross-spawn/-/cross-spawn-7.0.3.tgz", + "integrity": "sha512-iRDPJKUPVEND7dHPO8rkbOnPpyDygcDFtWjpeWNCgy8WP2rXcxXL8TskReQl6OrB2G7+UJrags1q15Fudc7G6w==", + "dependencies": { + "path-key": "^3.1.0", + "shebang-command": "^2.0.0", + "which": "^2.0.1" + }, + "engines": { + "node": ">= 8" + } + }, + "node_modules/debug": { + "version": "4.3.4", + "resolved": "https://registry.npmjs.org/debug/-/debug-4.3.4.tgz", + "integrity": "sha512-PRWFHuSU3eDtQJPvnNY7Jcket1j0t5OuOsFzPPzsekD52Zl8qUfFIPEiswXqIvHWGVHOgX+7G/vCNNhehwxfkQ==", + "dependencies": { + "ms": "2.1.2" + }, + "engines": { + "node": ">=6.0" + }, + "peerDependenciesMeta": { + "supports-color": { + "optional": true + } + } + }, + "node_modules/decamelize": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/decamelize/-/decamelize-1.2.0.tgz", + "integrity": "sha512-z2S+W9X73hAUUki+N+9Za2lBlun89zigOyGrsax+KUQ6wKW4ZoWpEYBkGhQjwAjjDCkWxhY0VKEhk8wzY7F5cA==", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/deep-eql": { + "version": "4.1.3", + "resolved": "https://registry.npmjs.org/deep-eql/-/deep-eql-4.1.3.tgz", + "integrity": "sha512-WaEtAOpRA1MQ0eohqZjpGD8zdI0Ovsm8mmFhaDN8dvDZzyoUMcYDnf5Y6iu7HTXxf8JDS23qWa4a+hKCDyOPzw==", + "dependencies": { + "type-detect": "^4.0.0" + }, + "engines": { + "node": ">=6" + } + }, + "node_modules/diff-sequences": { + "version": "29.6.3", + "resolved": "https://registry.npmjs.org/diff-sequences/-/diff-sequences-29.6.3.tgz", + "integrity": "sha512-EjePK1srD3P08o2j4f0ExnylqRs5B9tJjcp9t1krH2qRi8CCdsYfwe9JgSLurFBWwq4uOlipzfk5fHNvwFKr8Q==", + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/emoji-regex": { + "version": "8.0.0", + "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-8.0.0.tgz", + "integrity": "sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A==" + }, + "node_modules/esbuild": { + "version": "0.20.2", + "resolved": "https://registry.npmjs.org/esbuild/-/esbuild-0.20.2.tgz", + "integrity": "sha512-WdOOppmUNU+IbZ0PaDiTst80zjnrOkyJNHoKupIcVyU8Lvla3Ugx94VzkQ32Ijqd7UhHJy75gNWDMUekcrSJ6g==", + "hasInstallScript": true, + "bin": { + "esbuild": "bin/esbuild" + }, + "engines": { + "node": ">=12" + }, + "optionalDependencies": { + "@esbuild/aix-ppc64": "0.20.2", + "@esbuild/android-arm": "0.20.2", + "@esbuild/android-arm64": "0.20.2", + "@esbuild/android-x64": "0.20.2", + "@esbuild/darwin-arm64": "0.20.2", + "@esbuild/darwin-x64": "0.20.2", + "@esbuild/freebsd-arm64": "0.20.2", + "@esbuild/freebsd-x64": "0.20.2", + "@esbuild/linux-arm": "0.20.2", + "@esbuild/linux-arm64": "0.20.2", + "@esbuild/linux-ia32": "0.20.2", + "@esbuild/linux-loong64": "0.20.2", + "@esbuild/linux-mips64el": "0.20.2", + "@esbuild/linux-ppc64": "0.20.2", + "@esbuild/linux-riscv64": "0.20.2", + "@esbuild/linux-s390x": "0.20.2", + "@esbuild/linux-x64": "0.20.2", + "@esbuild/netbsd-x64": "0.20.2", + "@esbuild/openbsd-x64": "0.20.2", + "@esbuild/sunos-x64": "0.20.2", + "@esbuild/win32-arm64": "0.20.2", + "@esbuild/win32-ia32": "0.20.2", + "@esbuild/win32-x64": "0.20.2" + } + }, + "node_modules/escalade": { + "version": "3.1.2", + "resolved": "https://registry.npmjs.org/escalade/-/escalade-3.1.2.tgz", + "integrity": "sha512-ErCHMCae19vR8vQGe50xIsVomy19rg6gFu3+r3jkEO46suLMWBksvVyoGgQV+jOfl84ZSOSlmv6Gxa89PmTGmA==", + "engines": { + "node": ">=6" + } + }, + "node_modules/estree-walker": { + "version": "3.0.3", + "resolved": "https://registry.npmjs.org/estree-walker/-/estree-walker-3.0.3.tgz", + "integrity": "sha512-7RUKfXgSMMkzt6ZuXmqapOurLGPPfgj6l9uRZ7lRGolvk0y2yocc35LdcxKC5PQZdn2DMqioAQ2NoWcrTKmm6g==", + "dependencies": { + "@types/estree": "^1.0.0" + } + }, + "node_modules/execa": { + "version": "8.0.1", + "resolved": "https://registry.npmjs.org/execa/-/execa-8.0.1.tgz", + "integrity": "sha512-VyhnebXciFV2DESc+p6B+y0LjSm0krU4OgJN44qFAhBY0TJ+1V61tYD2+wHusZ6F9n5K+vl8k0sTy7PEfV4qpg==", + "dependencies": { + "cross-spawn": "^7.0.3", + "get-stream": "^8.0.1", + "human-signals": "^5.0.0", + "is-stream": "^3.0.0", + "merge-stream": "^2.0.0", + "npm-run-path": "^5.1.0", + "onetime": "^6.0.0", + "signal-exit": "^4.1.0", + "strip-final-newline": "^3.0.0" + }, + "engines": { + "node": ">=16.17" + }, + "funding": { + "url": "https://github.com/sindresorhus/execa?sponsor=1" + } + }, + "node_modules/fast-check": { + "version": "3.18.0", + "resolved": "https://registry.npmjs.org/fast-check/-/fast-check-3.18.0.tgz", + "integrity": "sha512-/951xaT0kA40w0GXRsZXEwSTE7LugjZtSA/8vPgFkiPQ8wNp8tRvqWuNDHBgLxJYXtsK11e/7Q4ObkKW5BdTFQ==", + "funding": [ + { + "type": "individual", + "url": "https://github.com/sponsors/dubzzz" + }, + { + "type": "opencollective", + "url": "https://opencollective.com/fast-check" + } + ], + "dependencies": { + "pure-rand": "^6.1.0" + }, + "engines": { + "node": ">=8.0.0" + } + }, + "node_modules/fill-range": { + "version": "7.0.1", + "resolved": "https://registry.npmjs.org/fill-range/-/fill-range-7.0.1.tgz", + "integrity": "sha512-qOo9F+dMUmC2Lcb4BbVvnKJxTPjCm+RRpe4gDuGrzkL7mEVl/djYSu2OdQ2Pa302N4oqkSg9ir6jaLWJ2USVpQ==", + "dependencies": { + "to-regex-range": "^5.0.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/find-up": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/find-up/-/find-up-3.0.0.tgz", + "integrity": "sha512-1yD6RmLI1XBfxugvORwlck6f75tYL+iR0jqwsOrOxMZyGYqUuDhJ0l4AXdO1iX/FTs9cBAMEk1gWSEx1kSbylg==", + "dependencies": { + "locate-path": "^3.0.0" + }, + "engines": { + "node": ">=6" + } + }, + "node_modules/fsevents": { + "version": "2.3.3", + "resolved": "https://registry.npmjs.org/fsevents/-/fsevents-2.3.3.tgz", + "integrity": "sha512-5xoDfX+fL7faATnagmWPpbFtwh/R77WmMMqqHGS65C3vvB0YHrgF+B1YmZ3441tMj5n63k0212XNoJwzlhffQw==", + "hasInstallScript": true, + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": "^8.16.0 || ^10.6.0 || >=11.0.0" + } + }, + "node_modules/get-caller-file": { + "version": "2.0.5", + "resolved": "https://registry.npmjs.org/get-caller-file/-/get-caller-file-2.0.5.tgz", + "integrity": "sha512-DyFP3BM/3YHTQOCUL/w0OZHR0lpKeGrxotcHWcqNEdnltqFwXVfhEBQ94eIo34AfQpo0rGki4cyIiftY06h2Fg==", + "engines": { + "node": "6.* || 8.* || >= 10.*" + } + }, + "node_modules/get-func-name": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/get-func-name/-/get-func-name-2.0.2.tgz", + "integrity": "sha512-8vXOvuE167CtIc3OyItco7N/dpRtBbYOsPsXCz7X/PMnlGjYjSGuZJgM1Y7mmew7BKf9BqvLX2tnOVy1BBUsxQ==", + "engines": { + "node": "*" + } + }, + "node_modules/get-stream": { + "version": "8.0.1", + "resolved": "https://registry.npmjs.org/get-stream/-/get-stream-8.0.1.tgz", + "integrity": "sha512-VaUJspBffn/LMCJVoMvSAdmscJyS1auj5Zulnn5UoYcY531UWmdwhRWkcGKnGU93m5HSXP9LP2usOryrBtQowA==", + "engines": { + "node": ">=16" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/glob-parent": { + "version": "5.1.2", + "resolved": "https://registry.npmjs.org/glob-parent/-/glob-parent-5.1.2.tgz", + "integrity": "sha512-AOIgSQCepiJYwP3ARnGx+5VnTu2HBYdzbGP45eLw1vr3zB3vZLeyed1sC9hnbcOc9/SrMyM5RPQrkGz4aS9Zow==", + "dependencies": { + "is-glob": "^4.0.1" + }, + "engines": { + "node": ">= 6" + } + }, + "node_modules/human-signals": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/human-signals/-/human-signals-5.0.0.tgz", + "integrity": "sha512-AXcZb6vzzrFAUE61HnN4mpLqd/cSIwNQjtNWR0euPm6y0iqx3G4gOXaIDdtdDwZmhwe82LA6+zinmW4UBWVePQ==", + "engines": { + "node": ">=16.17.0" + } + }, + "node_modules/is-binary-path": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/is-binary-path/-/is-binary-path-2.1.0.tgz", + "integrity": "sha512-ZMERYes6pDydyuGidse7OsHxtbI7WVeUEozgR/g7rd0xUimYNlvZRE/K2MgZTjWy725IfelLeVcEM97mmtRGXw==", + "dependencies": { + "binary-extensions": "^2.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/is-extglob": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/is-extglob/-/is-extglob-2.1.1.tgz", + "integrity": "sha512-SbKbANkN603Vi4jEZv49LeVJMn4yGwsbzZworEoyEiutsN3nJYdbO36zfhGJ6QEDpOZIFkDtnq5JRxmvl3jsoQ==", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/is-fullwidth-code-point": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/is-fullwidth-code-point/-/is-fullwidth-code-point-3.0.0.tgz", + "integrity": "sha512-zymm5+u+sCsSWyD9qNaejV3DFvhCKclKdizYaJUuHA83RLjb7nSuGnddCHGv0hk+KY7BMAlsWeK4Ueg6EV6XQg==", + "engines": { + "node": ">=8" + } + }, + "node_modules/is-glob": { + "version": "4.0.3", + "resolved": "https://registry.npmjs.org/is-glob/-/is-glob-4.0.3.tgz", + "integrity": "sha512-xelSayHH36ZgE7ZWhli7pW34hNbNl8Ojv5KVmkJD4hBdD3th8Tfk9vYasLM+mXWOZhFkgZfxhLSnrwRr4elSSg==", + "dependencies": { + "is-extglob": "^2.1.1" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/is-number": { + "version": "7.0.0", + "resolved": "https://registry.npmjs.org/is-number/-/is-number-7.0.0.tgz", + "integrity": "sha512-41Cifkg6e8TylSpdtTpeLVMqvSBEVzTttHvERD741+pnZ8ANv0004MRL43QKPDlK9cGvNp6NZWZUBlbGXYxxng==", + "engines": { + "node": ">=0.12.0" + } + }, + "node_modules/is-stream": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/is-stream/-/is-stream-3.0.0.tgz", + "integrity": "sha512-LnQR4bZ9IADDRSkvpqMGvt/tEJWclzklNgSw48V5EAaAeDd6qGvN8ei6k5p0tvxSR171VmGyHuTiAOfxAbr8kA==", + "engines": { + "node": "^12.20.0 || ^14.13.1 || >=16.0.0" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/isexe": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/isexe/-/isexe-2.0.0.tgz", + "integrity": "sha512-RHxMLp9lnKHGHRng9QFhRCMbYAcVpn69smSGcq3f36xjgVVWThj4qqLbTLlq7Ssj8B+fIQ1EuCEGI2lKsyQeIw==" + }, + "node_modules/js-tokens": { + "version": "9.0.0", + "resolved": "https://registry.npmjs.org/js-tokens/-/js-tokens-9.0.0.tgz", + "integrity": "sha512-WriZw1luRMlmV3LGJaR6QOJjWwgLUTf89OwT2lUOyjX2dJGBwgmIkbcz+7WFZjrZM635JOIR517++e/67CP9dQ==" + }, + "node_modules/kleur": { + "version": "3.0.3", + "resolved": "https://registry.npmjs.org/kleur/-/kleur-3.0.3.tgz", + "integrity": "sha512-eTIzlVOSUR+JxdDFepEYcBMtZ9Qqdef+rnzWdRZuMbOywu5tO2w2N7rqjoANZ5k9vywhL6Br1VRjUIgTQx4E8w==", + "engines": { + "node": ">=6" + } + }, + "node_modules/kolorist": { + "version": "1.8.0", + "resolved": "https://registry.npmjs.org/kolorist/-/kolorist-1.8.0.tgz", + "integrity": "sha512-Y+60/zizpJ3HRH8DCss+q95yr6145JXZo46OTpFvDZWLfRCE4qChOyk1b26nMaNpfHHgxagk9dXT5OP0Tfe+dQ==" + }, + "node_modules/local-pkg": { + "version": "0.5.0", + "resolved": "https://registry.npmjs.org/local-pkg/-/local-pkg-0.5.0.tgz", + "integrity": "sha512-ok6z3qlYyCDS4ZEU27HaU6x/xZa9Whf8jD4ptH5UZTQYZVYeb9bnZ3ojVhiJNLiXK1Hfc0GNbLXcmZ5plLDDBg==", + "dependencies": { + "mlly": "^1.4.2", + "pkg-types": "^1.0.3" + }, + "engines": { + "node": ">=14" + }, + "funding": { + "url": "https://github.com/sponsors/antfu" + } + }, + "node_modules/locate-path": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/locate-path/-/locate-path-3.0.0.tgz", + "integrity": "sha512-7AO748wWnIhNqAuaty2ZWHkQHRSNfPVIsPIfwEOWO22AmaoVrWavlOcMR5nzTLNYvp36X220/maaRsrec1G65A==", + "dependencies": { + "p-locate": "^3.0.0", + "path-exists": "^3.0.0" + }, + "engines": { + "node": ">=6" + } + }, + "node_modules/lodash.clonedeep": { + "version": "4.5.0", + "resolved": "https://registry.npmjs.org/lodash.clonedeep/-/lodash.clonedeep-4.5.0.tgz", + "integrity": "sha512-H5ZhCF25riFd9uB5UCkVKo61m3S/xZk1x4wA6yp/L3RFP6Z/eHH1ymQcGLo7J3GMPfm0V/7m1tryHuGVxpqEBQ==" + }, + "node_modules/lodash.debounce": { + "version": "4.0.8", + "resolved": "https://registry.npmjs.org/lodash.debounce/-/lodash.debounce-4.0.8.tgz", + "integrity": "sha512-FT1yDzDYEoYWhnSGnpE/4Kj1fLZkDFyqRb7fNt6FdYOSxlUWAtp42Eh6Wb0rGIv/m9Bgo7x4GhQbm5Ys4SG5ow==" + }, + "node_modules/lodash.throttle": { + "version": "4.1.1", + "resolved": "https://registry.npmjs.org/lodash.throttle/-/lodash.throttle-4.1.1.tgz", + "integrity": "sha512-wIkUCfVKpVsWo3JSZlc+8MB5it+2AN5W8J7YVMST30UrvcQNZ1Okbj+rbVniijTWE6FGYy4XJq/rHkas8qJMLQ==" + }, + "node_modules/loupe": { + "version": "2.3.7", + "resolved": "https://registry.npmjs.org/loupe/-/loupe-2.3.7.tgz", + "integrity": "sha512-zSMINGVYkdpYSOBmLi0D1Uo7JU9nVdQKrHxC8eYlV+9YKK9WePqAlL7lSlorG/U2Fw1w0hTBmaa/jrQ3UbPHtA==", + "dependencies": { + "get-func-name": "^2.0.1" + } + }, + "node_modules/magic-string": { + "version": "0.30.10", + "resolved": "https://registry.npmjs.org/magic-string/-/magic-string-0.30.10.tgz", + "integrity": "sha512-iIRwTIf0QKV3UAnYK4PU8uiEc4SRh5jX0mwpIwETPpHdhVM4f53RSwS/vXvN1JhGX+Cs7B8qIq3d6AH49O5fAQ==", + "dependencies": { + "@jridgewell/sourcemap-codec": "^1.4.15" + } + }, + "node_modules/merge-stream": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/merge-stream/-/merge-stream-2.0.0.tgz", + "integrity": "sha512-abv/qOcuPfk3URPfDzmZU1LKmuw8kT+0nIHvKrKgFrwifol/doWcdA4ZqsWQ8ENrFKkd67Mfpo/LovbIUsbt3w==" + }, + "node_modules/mimic-fn": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/mimic-fn/-/mimic-fn-4.0.0.tgz", + "integrity": "sha512-vqiC06CuhBTUdZH+RYl8sFrL096vA45Ok5ISO6sE/Mr1jRbGH4Csnhi8f3wKVl7x8mO4Au7Ir9D3Oyv1VYMFJw==", + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/mlly": { + "version": "1.6.1", + "resolved": "https://registry.npmjs.org/mlly/-/mlly-1.6.1.tgz", + "integrity": "sha512-vLgaHvaeunuOXHSmEbZ9izxPx3USsk8KCQ8iC+aTlp5sKRSoZvwhHh5L9VbKSaVC6sJDqbyohIS76E2VmHIPAA==", + "dependencies": { + "acorn": "^8.11.3", + "pathe": "^1.1.2", + "pkg-types": "^1.0.3", + "ufo": "^1.3.2" + } + }, + "node_modules/ms": { + "version": "2.1.2", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.2.tgz", + "integrity": "sha512-sGkPx+VjMtmA6MX27oA4FBFELFCZZ4S4XqeGOXCv68tT+jb3vk/RyaKWP0PTKyWtmLSM0b+adUTEvbs1PEaH2w==" + }, + "node_modules/nanoid": { + "version": "3.3.7", + "resolved": "https://registry.npmjs.org/nanoid/-/nanoid-3.3.7.tgz", + "integrity": "sha512-eSRppjcPIatRIMC1U6UngP8XFcz8MQWGQdt1MTBQ7NaAmvXDfvNxbvWV3x2y6CdEUciCSsDHDQZbhYaB8QEo2g==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ], + "bin": { + "nanoid": "bin/nanoid.cjs" + }, + "engines": { + "node": "^10 || ^12 || ^13.7 || ^14 || >=15.0.1" + } + }, + "node_modules/node-fetch": { + "version": "2.7.0", + "resolved": "https://registry.npmjs.org/node-fetch/-/node-fetch-2.7.0.tgz", + "integrity": "sha512-c4FRfUm/dbcWZ7U+1Wq0AwCyFL+3nt2bEw05wfxSz+DWpWsitgmSgYmy2dQdWyKC1694ELPqMs/YzUSNozLt8A==", + "dependencies": { + "whatwg-url": "^5.0.0" + }, + "engines": { + "node": "4.x || >=6.0.0" + }, + "peerDependencies": { + "encoding": "^0.1.0" + }, + "peerDependenciesMeta": { + "encoding": { + "optional": true + } + } + }, + "node_modules/normalize-path": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/normalize-path/-/normalize-path-3.0.0.tgz", + "integrity": "sha512-6eZs5Ls3WtCisHWp9S2GUy8dqkpGi4BVSz3GaqiE6ezub0512ESztXUwUB6C6IKbQkY2Pnb/mD4WYojCRwcwLA==", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/npm-run-path": { + "version": "5.3.0", + "resolved": "https://registry.npmjs.org/npm-run-path/-/npm-run-path-5.3.0.tgz", + "integrity": "sha512-ppwTtiJZq0O/ai0z7yfudtBpWIoxM8yE6nHi1X47eFR2EWORqfbu6CnPlNsjeN683eT0qG6H/Pyf9fCcvjnnnQ==", + "dependencies": { + "path-key": "^4.0.0" + }, + "engines": { + "node": "^12.20.0 || ^14.13.1 || >=16.0.0" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/npm-run-path/node_modules/path-key": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/path-key/-/path-key-4.0.0.tgz", + "integrity": "sha512-haREypq7xkM7ErfgIyA0z+Bj4AGKlMSdlQE2jvJo6huWD1EdkKYV+G/T4nq0YEF2vgTT8kqMFKo1uHn950r4SQ==", + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/onetime": { + "version": "6.0.0", + "resolved": "https://registry.npmjs.org/onetime/-/onetime-6.0.0.tgz", + "integrity": "sha512-1FlR+gjXK7X+AsAHso35MnyN5KqGwJRi/31ft6x0M194ht7S+rWAvd7PHss9xSKMzE0asv1pyIHaJYq+BbacAQ==", + "dependencies": { + "mimic-fn": "^4.0.0" + }, + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/p-limit": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/p-limit/-/p-limit-5.0.0.tgz", + "integrity": "sha512-/Eaoq+QyLSiXQ4lyYV23f14mZRQcXnxfHrN0vCai+ak9G0pp9iEQukIIZq5NccEvwRB8PUnZT0KsOoDCINS1qQ==", + "dependencies": { + "yocto-queue": "^1.0.0" + }, + "engines": { + "node": ">=18" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/p-locate": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/p-locate/-/p-locate-3.0.0.tgz", + "integrity": "sha512-x+12w/To+4GFfgJhBEpiDcLozRJGegY+Ei7/z0tSLkMmxGZNybVMSfWj9aJn8Z5Fc7dBUNJOOVgPv2H7IwulSQ==", + "dependencies": { + "p-limit": "^2.0.0" + }, + "engines": { + "node": ">=6" + } + }, + "node_modules/p-locate/node_modules/p-limit": { + "version": "2.3.0", + "resolved": "https://registry.npmjs.org/p-limit/-/p-limit-2.3.0.tgz", + "integrity": "sha512-//88mFWSJx8lxCzwdAABTJL2MyWB12+eIY7MDL2SqLmAkeKU9qxRvWuSyTjm3FUmpBEMuFfckAIqEaVGUDxb6w==", + "dependencies": { + "p-try": "^2.0.0" + }, + "engines": { + "node": ">=6" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/p-try": { + "version": "2.2.0", + "resolved": "https://registry.npmjs.org/p-try/-/p-try-2.2.0.tgz", + "integrity": "sha512-R4nPAVTAU0B9D35/Gk3uJf/7XYbQcyohSKdvAxIRSNghFl4e71hVoGnBNQz9cWaXxO2I10KTC+3jMdvvoKw6dQ==", + "engines": { + "node": ">=6" + } + }, + "node_modules/path-exists": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/path-exists/-/path-exists-3.0.0.tgz", + "integrity": "sha512-bpC7GYwiDYQ4wYLe+FA8lhRjhQCMcQGuSgGGqDkg/QerRWw9CmGRT0iSOVRSZJ29NMLZgIzqaljJ63oaL4NIJQ==", + "engines": { + "node": ">=4" + } + }, + "node_modules/path-key": { + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/path-key/-/path-key-3.1.1.tgz", + "integrity": "sha512-ojmeN0qd+y0jszEtoY48r0Peq5dwMEkIlCOu6Q5f41lfkswXuKtYrhgoTpLnyIcHm24Uhqx+5Tqm2InSwLhE6Q==", + "engines": { + "node": ">=8" + } + }, + "node_modules/pathe": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/pathe/-/pathe-1.1.2.tgz", + "integrity": "sha512-whLdWMYL2TwI08hn8/ZqAbrVemu0LNaNNJZX73O6qaIdCTfXutsLhMkjdENX0qhsQ9uIimo4/aQOmXkoon2nDQ==" + }, + "node_modules/pathval": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/pathval/-/pathval-1.1.1.tgz", + "integrity": "sha512-Dp6zGqpTdETdR63lehJYPeIOqpiNBNtc7BpWSLrOje7UaIsE5aY92r/AunQA7rsXvet3lrJ3JnZX29UPTKXyKQ==", + "engines": { + "node": "*" + } + }, + "node_modules/picocolors": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/picocolors/-/picocolors-1.0.0.tgz", + "integrity": "sha512-1fygroTLlHu66zi26VoTDv8yRgm0Fccecssto+MhsZ0D/DGW2sm8E8AjW7NU5VVTRt5GxbeZ5qBuJr+HyLYkjQ==" + }, + "node_modules/picomatch": { + "version": "2.3.1", + "resolved": "https://registry.npmjs.org/picomatch/-/picomatch-2.3.1.tgz", + "integrity": "sha512-JU3teHTNjmE2VCGFzuY8EXzCDVwEqB2a8fsIvwaStHhAWJEeVd1o1QD80CU6+ZdEXXSLbSsuLwJjkCBWqRQUVA==", + "engines": { + "node": ">=8.6" + }, + "funding": { + "url": "https://github.com/sponsors/jonschlinkert" + } + }, + "node_modules/pkg-types": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/pkg-types/-/pkg-types-1.1.0.tgz", + "integrity": "sha512-/RpmvKdxKf8uILTtoOhAgf30wYbP2Qw+L9p3Rvshx1JZVX+XQNZQFjlbmGHEGIm4CkVPlSn+NXmIM8+9oWQaSA==", + "dependencies": { + "confbox": "^0.1.7", + "mlly": "^1.6.1", + "pathe": "^1.1.2" + } + }, + "node_modules/postcss": { + "version": "8.4.38", + "resolved": "https://registry.npmjs.org/postcss/-/postcss-8.4.38.tgz", + "integrity": "sha512-Wglpdk03BSfXkHoQa3b/oulrotAkwrlLDRSOb9D0bN86FdRyE9lppSp33aHNPgBa0JKCoB+drFLZkQoRRYae5A==", + "funding": [ + { + "type": "opencollective", + "url": "https://opencollective.com/postcss/" + }, + { + "type": "tidelift", + "url": "https://tidelift.com/funding/github/npm/postcss" + }, + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ], + "dependencies": { + "nanoid": "^3.3.7", + "picocolors": "^1.0.0", + "source-map-js": "^1.2.0" + }, + "engines": { + "node": "^10 || ^12 || >=14" + } + }, + "node_modules/pretty-format": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/pretty-format/-/pretty-format-29.7.0.tgz", + "integrity": "sha512-Pdlw/oPxN+aXdmM9R00JVC9WVFoCLTKJvDVLgmJ+qAffBMxsV85l/Lu7sNx4zSzPyoL2euImuEwHhOXdEgNFZQ==", + "dependencies": { + "@jest/schemas": "^29.6.3", + "ansi-styles": "^5.0.0", + "react-is": "^18.0.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/prompts": { + "version": "2.4.2", + "resolved": "https://registry.npmjs.org/prompts/-/prompts-2.4.2.tgz", + "integrity": "sha512-NxNv/kLguCA7p3jE8oL2aEBsrJWgAakBpgmgK6lpPWV+WuOmY6r2/zbAVnP+T8bQlA0nzHXSJSJW0Hq7ylaD2Q==", + "dependencies": { + "kleur": "^3.0.3", + "sisteransi": "^1.0.5" + }, + "engines": { + "node": ">= 6" + } + }, + "node_modules/pure-rand": { + "version": "6.1.0", + "resolved": "https://registry.npmjs.org/pure-rand/-/pure-rand-6.1.0.tgz", + "integrity": "sha512-bVWawvoZoBYpp6yIoQtQXHZjmz35RSVHnUOTefl8Vcjr8snTPY1wnpSPMWekcFwbxI6gtmT7rSYPFvz71ldiOA==", + "funding": [ + { + "type": "individual", + "url": "https://github.com/sponsors/dubzzz" + }, + { + "type": "opencollective", + "url": "https://opencollective.com/fast-check" + } + ] + }, + "node_modules/react-is": { + "version": "18.3.1", + "resolved": "https://registry.npmjs.org/react-is/-/react-is-18.3.1.tgz", + "integrity": "sha512-/LLMVyas0ljjAtoYiPqYiL8VWXzUUdThrmU5+n20DZv+a+ClRoevUzw5JxU+Ieh5/c87ytoTBV9G1FiKfNJdmg==" + }, + "node_modules/readdirp": { + "version": "3.6.0", + "resolved": "https://registry.npmjs.org/readdirp/-/readdirp-3.6.0.tgz", + "integrity": "sha512-hOS089on8RduqdbhvQ5Z37A0ESjsqz6qnRcffsMU3495FuTdqSm+7bhJ29JvIOsBDEEnan5DPu9t3To9VRlMzA==", + "dependencies": { + "picomatch": "^2.2.1" + }, + "engines": { + "node": ">=8.10.0" + } + }, + "node_modules/require-directory": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/require-directory/-/require-directory-2.1.1.tgz", + "integrity": "sha512-fGxEI7+wsG9xrvdjsrlmL22OMTTiHRwAMroiEeMgq8gzoLC/PQr7RsRDSTLUg/bZAZtF+TVIkHc6/4RIKrui+Q==", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/require-main-filename": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/require-main-filename/-/require-main-filename-2.0.0.tgz", + "integrity": "sha512-NKN5kMDylKuldxYLSUfrbo5Tuzh4hd+2E8NPPX02mZtn1VuREQToYe/ZdlJy+J3uCpfaiGF05e7B8W0iXbQHmg==" + }, + "node_modules/ripemd160-min": { + "version": "0.0.6", + "resolved": "https://registry.npmjs.org/ripemd160-min/-/ripemd160-min-0.0.6.tgz", + "integrity": "sha512-+GcJgQivhs6S9qvLogusiTcS9kQUfgR75whKuy5jIhuiOfQuJ8fjqxV6EGD5duH1Y/FawFUMtMhyeq3Fbnib8A==", + "engines": { + "node": ">=8" + } + }, + "node_modules/rollup": { + "version": "4.17.1", + "resolved": "https://registry.npmjs.org/rollup/-/rollup-4.17.1.tgz", + "integrity": "sha512-0gG94inrUtg25sB2V/pApwiv1lUb0bQ25FPNuzO89Baa+B+c0ccaaBKM5zkZV/12pUUdH+lWCSm9wmHqyocuVQ==", + "dependencies": { + "@types/estree": "1.0.5" + }, + "bin": { + "rollup": "dist/bin/rollup" + }, + "engines": { + "node": ">=18.0.0", + "npm": ">=8.0.0" + }, + "optionalDependencies": { + "@rollup/rollup-android-arm-eabi": "4.17.1", + "@rollup/rollup-android-arm64": "4.17.1", + "@rollup/rollup-darwin-arm64": "4.17.1", + "@rollup/rollup-darwin-x64": "4.17.1", + "@rollup/rollup-linux-arm-gnueabihf": "4.17.1", + "@rollup/rollup-linux-arm-musleabihf": "4.17.1", + "@rollup/rollup-linux-arm64-gnu": "4.17.1", + "@rollup/rollup-linux-arm64-musl": "4.17.1", + "@rollup/rollup-linux-powerpc64le-gnu": "4.17.1", + "@rollup/rollup-linux-riscv64-gnu": "4.17.1", + "@rollup/rollup-linux-s390x-gnu": "4.17.1", + "@rollup/rollup-linux-x64-gnu": "4.17.1", + "@rollup/rollup-linux-x64-musl": "4.17.1", + "@rollup/rollup-win32-arm64-msvc": "4.17.1", + "@rollup/rollup-win32-ia32-msvc": "4.17.1", + "@rollup/rollup-win32-x64-msvc": "4.17.1", + "fsevents": "~2.3.2" + } + }, + "node_modules/safe-buffer": { + "version": "5.2.1", + "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.2.1.tgz", + "integrity": "sha512-rp3So07KcdmmKbGvgaNxQSJr7bGVSVk5S9Eq1F+ppbRo70+YeaDxkw5Dd8NPN+GD6bjnYm2VuPuCXmpuYvmCXQ==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/feross" + }, + { + "type": "patreon", + "url": "https://www.patreon.com/feross" + }, + { + "type": "consulting", + "url": "https://feross.org/support" + } + ] + }, + "node_modules/set-blocking": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/set-blocking/-/set-blocking-2.0.0.tgz", + "integrity": "sha512-KiKBS8AnWGEyLzofFfmvKwpdPzqiy16LvQfK3yv/fVH7Bj13/wl3JSR1J+rfgRE9q7xUJK4qvgS8raSOeLUehw==" + }, + "node_modules/shebang-command": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/shebang-command/-/shebang-command-2.0.0.tgz", + "integrity": "sha512-kHxr2zZpYtdmrN1qDjrrX/Z1rR1kG8Dx+gkpK1G4eXmvXswmcE1hTWBWYUzlraYw1/yZp6YuDY77YtvbN0dmDA==", + "dependencies": { + "shebang-regex": "^3.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/shebang-regex": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/shebang-regex/-/shebang-regex-3.0.0.tgz", + "integrity": "sha512-7++dFhtcx3353uBaq8DDR4NuxBetBzC7ZQOhmTQInHEd6bSrXdiEyzCvG07Z44UYdLShWUyXt5M/yhz8ekcb1A==", + "engines": { + "node": ">=8" + } + }, + "node_modules/siginfo": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/siginfo/-/siginfo-2.0.0.tgz", + "integrity": "sha512-ybx0WO1/8bSBLEWXZvEd7gMW3Sn3JFlW3TvX1nREbDLRNQNaeNN8WK0meBwPdAaOI7TtRRRJn/Es1zhrrCHu7g==" + }, + "node_modules/signal-exit": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/signal-exit/-/signal-exit-4.1.0.tgz", + "integrity": "sha512-bzyZ1e88w9O1iNJbKnOlvYTrWPDl46O1bG0D3XInv+9tkPrxrN8jUUTiFlDkkmKWgn1M6CfIA13SuGqOa9Korw==", + "engines": { + "node": ">=14" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, + "node_modules/sisteransi": { + "version": "1.0.5", + "resolved": "https://registry.npmjs.org/sisteransi/-/sisteransi-1.0.5.tgz", + "integrity": "sha512-bLGGlR1QxBcynn2d5YmDX4MGjlZvy2MRBDRNHLJ8VI6l6+9FUiyTFNJ0IveOSP0bcXgVDPRcfGqA0pjaqUpfVg==" + }, + "node_modules/source-map-js": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/source-map-js/-/source-map-js-1.2.0.tgz", + "integrity": "sha512-itJW8lvSA0TXEphiRoawsCksnlf8SyvmFzIhltqAHluXd88pkCd+cXJVHTDwdCr0IzwptSm035IHQktUu1QUMg==", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/stackback": { + "version": "0.0.2", + "resolved": "https://registry.npmjs.org/stackback/-/stackback-0.0.2.tgz", + "integrity": "sha512-1XMJE5fQo1jGH6Y/7ebnwPOBEkIEnT4QF32d5R1+VXdXveM0IBMJt8zfaxX1P3QhVwrYe+576+jkANtSS2mBbw==" + }, + "node_modules/std-env": { + "version": "3.7.0", + "resolved": "https://registry.npmjs.org/std-env/-/std-env-3.7.0.tgz", + "integrity": "sha512-JPbdCEQLj1w5GilpiHAx3qJvFndqybBysA3qUOnznweH4QbNYUsW/ea8QzSrnh0vNsezMMw5bcVool8lM0gwzg==" + }, + "node_modules/string-width": { + "version": "4.2.3", + "resolved": "https://registry.npmjs.org/string-width/-/string-width-4.2.3.tgz", + "integrity": "sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g==", + "dependencies": { + "emoji-regex": "^8.0.0", + "is-fullwidth-code-point": "^3.0.0", + "strip-ansi": "^6.0.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/strip-ansi": { + "version": "6.0.1", + "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.1.tgz", + "integrity": "sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A==", + "dependencies": { + "ansi-regex": "^5.0.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/strip-final-newline": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/strip-final-newline/-/strip-final-newline-3.0.0.tgz", + "integrity": "sha512-dOESqjYr96iWYylGObzd39EuNTa5VJxyvVAEm5Jnh7KGo75V43Hk1odPQkNDyXNmUR6k+gEiDVXnjB8HJ3crXw==", + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/strip-literal": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/strip-literal/-/strip-literal-2.1.0.tgz", + "integrity": "sha512-Op+UycaUt/8FbN/Z2TWPBLge3jWrP3xj10f3fnYxf052bKuS3EKs1ZQcVGjnEMdsNVAM+plXRdmjrZ/KgG3Skw==", + "dependencies": { + "js-tokens": "^9.0.0" + }, + "funding": { + "url": "https://github.com/sponsors/antfu" + } + }, + "node_modules/tinybench": { + "version": "2.8.0", + "resolved": "https://registry.npmjs.org/tinybench/-/tinybench-2.8.0.tgz", + "integrity": "sha512-1/eK7zUnIklz4JUUlL+658n58XO2hHLQfSk1Zf2LKieUjxidN16eKFEoDEfjHc3ohofSSqK3X5yO6VGb6iW8Lw==" + }, + "node_modules/tinypool": { + "version": "0.8.4", + "resolved": "https://registry.npmjs.org/tinypool/-/tinypool-0.8.4.tgz", + "integrity": "sha512-i11VH5gS6IFeLY3gMBQ00/MmLncVP7JLXOw1vlgkytLmJK7QnEr7NXf0LBdxfmNPAeyetukOk0bOYrJrFGjYJQ==", + "engines": { + "node": ">=14.0.0" + } + }, + "node_modules/tinyspy": { + "version": "2.2.1", + "resolved": "https://registry.npmjs.org/tinyspy/-/tinyspy-2.2.1.tgz", + "integrity": "sha512-KYad6Vy5VDWV4GH3fjpseMQ/XU2BhIYP7Vzd0LG44qRWm/Yt2WCOTicFdvmgo6gWaqooMQCawTtILVQJupKu7A==", + "engines": { + "node": ">=14.0.0" + } + }, + "node_modules/to-regex-range": { + "version": "5.0.1", + "resolved": "https://registry.npmjs.org/to-regex-range/-/to-regex-range-5.0.1.tgz", + "integrity": "sha512-65P7iz6X5yEr1cwcgvQxbbIw7Uk3gOy5dIdtZ4rDveLqhrdJP+Li/Hx6tyK0NEb+2GCyneCMJiGqrADCSNk8sQ==", + "dependencies": { + "is-number": "^7.0.0" + }, + "engines": { + "node": ">=8.0" + } + }, + "node_modules/tr46": { + "version": "0.0.3", + "resolved": "https://registry.npmjs.org/tr46/-/tr46-0.0.3.tgz", + "integrity": "sha512-N3WMsuqV66lT30CrXNbEjx4GEwlow3v6rr4mCcv6prnfwhS01rkgyFdjPNBYd9br7LpXV1+Emh01fHnq2Gdgrw==" + }, + "node_modules/type-detect": { + "version": "4.0.8", + "resolved": "https://registry.npmjs.org/type-detect/-/type-detect-4.0.8.tgz", + "integrity": "sha512-0fr/mIH1dlO+x7TlcMy+bIDqKPsw/70tVyeHW787goQjhmqaZe10uwLujubK9q9Lg6Fiho1KUKDYz0Z7k7g5/g==", + "engines": { + "node": ">=4" + } + }, + "node_modules/typescript": { + "version": "5.4.5", + "resolved": "https://registry.npmjs.org/typescript/-/typescript-5.4.5.tgz", + "integrity": "sha512-vcI4UpRgg81oIRUFwR0WSIHKt11nJ7SAVlYNIu+QpqeyXP+gpQJy/Z4+F0aGxSE4MqwjyXvW/TzgkLAx2AGHwQ==", + "bin": { + "tsc": "bin/tsc", + "tsserver": "bin/tsserver" + }, + "engines": { + "node": ">=14.17" + } + }, + "node_modules/ufo": { + "version": "1.5.3", + "resolved": "https://registry.npmjs.org/ufo/-/ufo-1.5.3.tgz", + "integrity": "sha512-Y7HYmWaFwPUmkoQCUIAYpKqkOf+SbVj/2fJJZ4RJMCfZp0rTGwRbzQD+HghfnhKOjL9E01okqz+ncJskGYfBNw==" + }, + "node_modules/undici-types": { + "version": "5.26.5", + "resolved": "https://registry.npmjs.org/undici-types/-/undici-types-5.26.5.tgz", + "integrity": "sha512-JlCMO+ehdEIKqlFxk6IfVoAUVmgz7cU7zD/h9XZ0qzeosSHmUJVOzSQvvYSYWXkFXC+IfLKSIffhv0sVZup6pA==" + }, + "node_modules/varuint-bitcoin": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/varuint-bitcoin/-/varuint-bitcoin-1.1.2.tgz", + "integrity": "sha512-4EVb+w4rx+YfVM32HQX42AbbT7/1f5zwAYhIujKXKk8NQK+JfRVl3pqT3hjNn/L+RstigmGGKVwHA/P0wgITZw==", + "dependencies": { + "safe-buffer": "^5.1.1" + } + }, + "node_modules/vite": { + "version": "5.2.10", + "resolved": "https://registry.npmjs.org/vite/-/vite-5.2.10.tgz", + "integrity": "sha512-PAzgUZbP7msvQvqdSD+ErD5qGnSFiGOoWmV5yAKUEI0kdhjbH6nMWVyZQC/hSc4aXwc0oJ9aEdIiF9Oje0JFCw==", + "dependencies": { + "esbuild": "^0.20.1", + "postcss": "^8.4.38", + "rollup": "^4.13.0" + }, + "bin": { + "vite": "bin/vite.js" + }, + "engines": { + "node": "^18.0.0 || >=20.0.0" + }, + "funding": { + "url": "https://github.com/vitejs/vite?sponsor=1" + }, + "optionalDependencies": { + "fsevents": "~2.3.3" + }, + "peerDependencies": { + "@types/node": "^18.0.0 || >=20.0.0", + "less": "*", + "lightningcss": "^1.21.0", + "sass": "*", + "stylus": "*", + "sugarss": "*", + "terser": "^5.4.0" + }, + "peerDependenciesMeta": { + "@types/node": { + "optional": true + }, + "less": { + "optional": true + }, + "lightningcss": { + "optional": true + }, + "sass": { + "optional": true + }, + "stylus": { + "optional": true + }, + "sugarss": { + "optional": true + }, + "terser": { + "optional": true + } + } + }, + "node_modules/vite-node": { + "version": "1.5.2", + "resolved": "https://registry.npmjs.org/vite-node/-/vite-node-1.5.2.tgz", + "integrity": "sha512-Y8p91kz9zU+bWtF7HGt6DVw2JbhyuB2RlZix3FPYAYmUyZ3n7iTp8eSyLyY6sxtPegvxQtmlTMhfPhUfCUF93A==", + "dependencies": { + "cac": "^6.7.14", + "debug": "^4.3.4", + "pathe": "^1.1.1", + "picocolors": "^1.0.0", + "vite": "^5.0.0" + }, + "bin": { + "vite-node": "vite-node.mjs" + }, + "engines": { + "node": "^18.0.0 || >=20.0.0" + }, + "funding": { + "url": "https://opencollective.com/vitest" + } + }, + "node_modules/vitest": { + "version": "1.5.2", + "resolved": "https://registry.npmjs.org/vitest/-/vitest-1.5.2.tgz", + "integrity": "sha512-l9gwIkq16ug3xY7BxHwcBQovLZG75zZL0PlsiYQbf76Rz6QGs54416UWMtC0jXeihvHvcHrf2ROEjkQRVpoZYw==", + "dependencies": { + "@vitest/expect": "1.5.2", + "@vitest/runner": "1.5.2", + "@vitest/snapshot": "1.5.2", + "@vitest/spy": "1.5.2", + "@vitest/utils": "1.5.2", + "acorn-walk": "^8.3.2", + "chai": "^4.3.10", + "debug": "^4.3.4", + "execa": "^8.0.1", + "local-pkg": "^0.5.0", + "magic-string": "^0.30.5", + "pathe": "^1.1.1", + "picocolors": "^1.0.0", + "std-env": "^3.5.0", + "strip-literal": "^2.0.0", + "tinybench": "^2.5.1", + "tinypool": "^0.8.3", + "vite": "^5.0.0", + "vite-node": "1.5.2", + "why-is-node-running": "^2.2.2" + }, + "bin": { + "vitest": "vitest.mjs" + }, + "engines": { + "node": "^18.0.0 || >=20.0.0" + }, + "funding": { + "url": "https://opencollective.com/vitest" + }, + "peerDependencies": { + "@edge-runtime/vm": "*", + "@types/node": "^18.0.0 || >=20.0.0", + "@vitest/browser": "1.5.2", + "@vitest/ui": "1.5.2", + "happy-dom": "*", + "jsdom": "*" + }, + "peerDependenciesMeta": { + "@edge-runtime/vm": { + "optional": true + }, + "@types/node": { + "optional": true + }, + "@vitest/browser": { + "optional": true + }, + "@vitest/ui": { + "optional": true + }, + "happy-dom": { + "optional": true + }, + "jsdom": { + "optional": true + } + } + }, + "node_modules/vitest-environment-clarinet": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/vitest-environment-clarinet/-/vitest-environment-clarinet-2.1.0.tgz", + "integrity": "sha512-1SA9XZh47qmbV724sGo2FyjVU+Ar3m5TOU4bLGSlWDb/x388IKUPrHbHWqIQNwY+gwEm9VBfXEAd1LOSUdemBw==", + "peerDependencies": { + "@hirosystems/clarinet-sdk": ">=2.6.0", + "vitest": "^1.5.2" + } + }, + "node_modules/webidl-conversions": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/webidl-conversions/-/webidl-conversions-3.0.1.tgz", + "integrity": "sha512-2JAn3z8AR6rjK8Sm8orRC0h/bcl/DqL7tRPdGZ4I1CjdF+EaMLmYxBHyXuKL849eucPFhvBoxMsflfOb8kxaeQ==" + }, + "node_modules/whatwg-url": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/whatwg-url/-/whatwg-url-5.0.0.tgz", + "integrity": "sha512-saE57nupxk6v3HY35+jzBwYa0rKSy0XR8JSxZPwgLr7ys0IBzhGviA1/TUGJLmSVqs8pb9AnvICXEuOHLprYTw==", + "dependencies": { + "tr46": "~0.0.3", + "webidl-conversions": "^3.0.0" + } + }, + "node_modules/which": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/which/-/which-2.0.2.tgz", + "integrity": "sha512-BLI3Tl1TW3Pvl70l3yq3Y64i+awpwXqsGBYWkkqMtnbXgrMD+yj7rhW0kuEDxzJaYXGjEW5ogapKNMEKNMjibA==", + "dependencies": { + "isexe": "^2.0.0" + }, + "bin": { + "node-which": "bin/node-which" + }, + "engines": { + "node": ">= 8" + } + }, + "node_modules/which-module": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/which-module/-/which-module-2.0.1.tgz", + "integrity": "sha512-iBdZ57RDvnOR9AGBhML2vFZf7h8vmBjhoaZqODJBFWHVtKkDmKuHai3cx5PgVMrX5YDNp27AofYbAwctSS+vhQ==" + }, + "node_modules/why-is-node-running": { + "version": "2.2.2", + "resolved": "https://registry.npmjs.org/why-is-node-running/-/why-is-node-running-2.2.2.tgz", + "integrity": "sha512-6tSwToZxTOcotxHeA+qGCq1mVzKR3CwcJGmVcY+QE8SHy6TnpFnh8PAvPNHYr7EcuVeG0QSMxtYCuO1ta/G/oA==", + "dependencies": { + "siginfo": "^2.0.0", + "stackback": "0.0.2" + }, + "bin": { + "why-is-node-running": "cli.js" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/wrap-ansi": { + "version": "7.0.0", + "resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-7.0.0.tgz", + "integrity": "sha512-YVGIj2kamLSTxw6NsZjoBxfSwsn0ycdesmc4p+Q21c5zPuZ1pl+NfxVdxPtdHvmNVOQ6XSYG4AUtyt/Fi7D16Q==", + "dependencies": { + "ansi-styles": "^4.0.0", + "string-width": "^4.1.0", + "strip-ansi": "^6.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/chalk/wrap-ansi?sponsor=1" + } + }, + "node_modules/wrap-ansi/node_modules/ansi-styles": { + "version": "4.3.0", + "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz", + "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==", + "dependencies": { + "color-convert": "^2.0.1" + }, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/chalk/ansi-styles?sponsor=1" + } + }, + "node_modules/wrap-ansi/node_modules/color-convert": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz", + "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==", + "dependencies": { + "color-name": "~1.1.4" + }, + "engines": { + "node": ">=7.0.0" + } + }, + "node_modules/wrap-ansi/node_modules/color-name": { + "version": "1.1.4", + "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz", + "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==" + }, + "node_modules/y18n": { + "version": "5.0.8", + "resolved": "https://registry.npmjs.org/y18n/-/y18n-5.0.8.tgz", + "integrity": "sha512-0pfFzegeDWJHJIAmTLRP2DwHjdF5s7jo9tuztdQxAhINCdvS+3nGINqPd00AphqJR/0LhANUS6/+7SCb98YOfA==", + "engines": { + "node": ">=10" + } + }, + "node_modules/yargs": { + "version": "17.7.2", + "resolved": "https://registry.npmjs.org/yargs/-/yargs-17.7.2.tgz", + "integrity": "sha512-7dSzzRQ++CKnNI/krKnYRV7JKKPUXMEh61soaHKg9mrWEhzFWhFnxPxGl+69cD1Ou63C13NUPCnmIcrvqCuM6w==", + "dependencies": { + "cliui": "^8.0.1", + "escalade": "^3.1.1", + "get-caller-file": "^2.0.5", + "require-directory": "^2.1.1", + "string-width": "^4.2.3", + "y18n": "^5.0.5", + "yargs-parser": "^21.1.1" + }, + "engines": { + "node": ">=12" + } + }, + "node_modules/yargs-parser": { + "version": "21.1.1", + "resolved": "https://registry.npmjs.org/yargs-parser/-/yargs-parser-21.1.1.tgz", + "integrity": "sha512-tVpsJW7DdjecAiFpbIB1e3qxIQsE6NoPc5/eTdrbbIC4h0LVsWhnoa3g+m2HclBIujHzsxZ4VJVA+GUuc2/LBw==", + "engines": { + "node": ">=12" + } + }, + "node_modules/yocto-queue": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/yocto-queue/-/yocto-queue-1.0.0.tgz", + "integrity": "sha512-9bnSc/HEW2uRy67wc+T8UwauLuPJVn28jb+GtJY16iiKWyvmYJRXVT4UamsAEGQfPohgr2q4Tq0sQbQlxTfi1g==", + "engines": { + "node": ">=12.20" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + } + } +} diff --git a/contrib/boot-contracts-stateful-prop-tests/package.json b/contrib/boot-contracts-stateful-prop-tests/package.json new file mode 100644 index 00000000000..89e3da95b00 --- /dev/null +++ b/contrib/boot-contracts-stateful-prop-tests/package.json @@ -0,0 +1,23 @@ +{ + "name": "boot-contracts-stateful-prop-tests", + "version": "1.0.0", + "description": "Run stateful property-based tests on this project.", + "private": true, + "type": "module", + "scripts": { + "test": "vitest run" + }, + "author": "", + "license": "ISC", + "dependencies": { + "@hirosystems/clarinet-sdk": "^2.6.0", + "@stacks/stacking": "^6.14.0", + "@stacks/transactions": "^6.13.1", + "chokidar-cli": "^3.0.0", + "fast-check": "^3.18.0", + "typescript": "^5.4.5", + "vite": "^5.2.10", + "vitest": "^1.5.2", + "vitest-environment-clarinet": "^2.1.0" + } +} diff --git a/contrib/boot-contracts-stateful-prop-tests/settings/Devnet.toml b/contrib/boot-contracts-stateful-prop-tests/settings/Devnet.toml new file mode 100644 index 00000000000..bb941fddc90 --- /dev/null +++ b/contrib/boot-contracts-stateful-prop-tests/settings/Devnet.toml @@ -0,0 +1,73 @@ +[network] +name = "devnet" + +[accounts.deployer] +mnemonic = "twice kind fence tip hidden tilt action fragile skin nothing glory cousin green tomorrow spring wrist shed math olympic multiply hip blue scout claw" +balance = 100_000_000_000_000 +# secret_key: 753b7cc01a1a2e86221266a154af739463fce51219d97e4f856cd7200c3bd2a601 +# stx_address: ST1PQHQKV0RJXZFY1DGX8MNSNYVE3VGZJSRTPGZGM +# btc_address: mqVnk6NPRdhntvfm4hh9vvjiRkFDUuSYsH + +[accounts.wallet_1] +mnemonic = "sell invite acquire kitten bamboo drastic jelly vivid peace spawn twice guilt pave pen trash pretty park cube fragile unaware remain midnight betray rebuild" +balance = 100_000_000_000_000 +# secret_key: 7287ba251d44a4d3fd9276c88ce34c5c52a038955511cccaf77e61068649c17801 +# stx_address: ST1SJ3DTE5DN7X54YDH5D64R3BCB6A2AG2ZQ8YPD5 +# btc_address: mr1iPkD9N3RJZZxXRk7xF9d36gffa6exNC + +[accounts.wallet_2] +mnemonic = "hold excess usual excess ring elephant install account glad dry fragile donkey gaze humble truck breeze nation gasp vacuum limb head keep delay hospital" +balance = 100_000_000_000_000 +# secret_key: 530d9f61984c888536871c6573073bdfc0058896dc1adfe9a6a10dfacadc209101 +# stx_address: ST2CY5V39NHDPWSXMW9QDT3HC3GD6Q6XX4CFRK9AG +# btc_address: muYdXKmX9bByAueDe6KFfHd5Ff1gdN9ErG + +[accounts.wallet_3] +mnemonic = "cycle puppy glare enroll cost improve round trend wrist mushroom scorpion tower claim oppose clever elephant dinosaur eight problem before frozen dune wagon high" +balance = 100_000_000_000_000 +# secret_key: d655b2523bcd65e34889725c73064feb17ceb796831c0e111ba1a552b0f31b3901 +# stx_address: ST2JHG361ZXG51QTKY2NQCVBPPRRE2KZB1HR05NNC +# btc_address: mvZtbibDAAA3WLpY7zXXFqRa3T4XSknBX7 + +[accounts.wallet_4] +mnemonic = "board list obtain sugar hour worth raven scout denial thunder horse logic fury scorpion fold genuine phrase wealth news aim below celery when cabin" +balance = 100_000_000_000_000 +# secret_key: f9d7206a47f14d2870c163ebab4bf3e70d18f5d14ce1031f3902fbbc894fe4c701 +# stx_address: ST2NEB84ASENDXKYGJPQW86YXQCEFEX2ZQPG87ND +# btc_address: mg1C76bNTutiCDV3t9nWhZs3Dc8LzUufj8 + +[accounts.wallet_5] +mnemonic = "hurry aunt blame peanut heavy update captain human rice crime juice adult scale device promote vast project quiz unit note reform update climb purchase" +balance = 100_000_000_000_000 +# secret_key: 3eccc5dac8056590432db6a35d52b9896876a3d5cbdea53b72400bc9c2099fe801 +# stx_address: ST2REHHS5J3CERCRBEPMGH7921Q6PYKAADT7JP2VB +# btc_address: mweN5WVqadScHdA81aATSdcVr4B6dNokqx + +[accounts.wallet_6] +mnemonic = "area desk dutch sign gold cricket dawn toward giggle vibrant indoor bench warfare wagon number tiny universe sand talk dilemma pottery bone trap buddy" +balance = 100_000_000_000_000 +# secret_key: 7036b29cb5e235e5fd9b09ae3e8eec4404e44906814d5d01cbca968a60ed4bfb01 +# stx_address: ST3AM1A56AK2C1XAFJ4115ZSV26EB49BVQ10MGCS0 +# btc_address: mzxXgV6e4BZSsz8zVHm3TmqbECt7mbuErt + +[accounts.wallet_7] +mnemonic = "prevent gallery kind limb income control noise together echo rival record wedding sense uncover school version force bleak nuclear include danger skirt enact arrow" +balance = 100_000_000_000_000 +# secret_key: b463f0df6c05d2f156393eee73f8016c5372caa0e9e29a901bb7171d90dc4f1401 +# stx_address: ST3PF13W7Z0RRM42A8VZRVFQ75SV1K26RXEP8YGKJ +# btc_address: n37mwmru2oaVosgfuvzBwgV2ysCQRrLko7 + +[accounts.wallet_8] +mnemonic = "female adjust gallery certain visit token during great side clown fitness like hurt clip knife warm bench start reunion globe detail dream depend fortune" +balance = 100_000_000_000_000 +# secret_key: 6a1a754ba863d7bab14adbbc3f8ebb090af9e871ace621d3e5ab634e1422885e01 +# stx_address: ST3NBRSFKX28FQ2ZJ1MAKX58HKHSDGNV5N7R21XCP +# btc_address: n2v875jbJ4RjBnTjgbfikDfnwsDV5iUByw + +[accounts.wallet_9] +mnemonic = "shadow private easily thought say logic fault paddle word top book during ignore notable orange flight clock image wealth health outside kitten belt reform" +balance = 100_000_000_000_000 +# secret_key: de433bdfa14ec43aa1098d5be594c8ffb20a31485ff9de2923b2689471c401b801 +# stx_address: STNHKEPYEPJ8ET55ZZ0M5A34J0R3N5FM2CMMMAZ6 +# btc_address: mjSrB3wS4xab3kYqFktwBzfTdPg367ZJ2d + diff --git a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/err_Commands.ts b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/err_Commands.ts new file mode 100644 index 00000000000..7e7a4f0e95b --- /dev/null +++ b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/err_Commands.ts @@ -0,0 +1,2147 @@ +import fc from "fast-check"; +import { Simnet } from "@hirosystems/clarinet-sdk"; +import { + hasLockedStackers, + hasPoolMembers, + isAllowedContractCaller, + isAmountAboveThreshold, + isAmountLockedPositive, + isAmountWithinBalance, + isAmountWithinDelegationLimit, + isATCAboveThreshold, + isATCPositive, + isCallerAllowedByStacker, + isIncreaseByGTZero, + isIncreaseByWithinUnlockedBalance, + isPeriodWithinMax, + isStackerDelegatingToOperator, + isDelegating, + isStacking, + isStackingSolo, + isStackingMinimumCalculated, + isUBHWithinDelegationLimit, + isUnlockedWithinCurrentRC, + isStackerInOperatorPool, + isStackerLockedByOperator, + PoxCommand, + Stacker, + StxAddress, + Wallet, + isPositive, +} from "./pox_CommandModel"; +import { + currentCycle, + currentCycleFirstBlock, + FIRST_BURNCHAIN_BLOCK_HEIGHT, + nextCycleFirstBlock, + REWARD_CYCLE_LENGTH, +} from "./pox_Commands"; +import { DelegateStackExtendCommand_Err } from "./pox_DelegateStackExtendCommand_Err"; +import { DelegateStackIncreaseCommand_Err } from "./pox_DelegateStackIncreaseCommand_Err"; +import { DelegateStackStxCommand_Err } from "./pox_DelegateStackStxCommand_Err"; +import { DelegateStxCommand_Err } from "./pox_DelegateStxCommand_Err"; +import { RevokeDelegateStxCommand_Err } from "./pox_RevokeDelegateStxCommand_Err"; +import { StackAggregationCommitAuthCommand_Err } from "./pox_StackAggregationCommitAuthCommand_Err"; +import { StackAggregationCommitIndexedAuthCommand_Err } from "./pox_StackAggregationCommitIndexedAuthCommand_Err"; +import { StackAggregationCommitIndexedSigCommand_Err } from "./pox_StackAggregationCommitIndexedSigCommand_Err"; +import { StackAggregationCommitSigCommand_Err } from "./pox_StackAggregationCommitSigCommand_Err"; +import { StackAggregationIncreaseCommand_Err } from "./pox_StackAggregationIncreaseCommand_Err"; +import { StackExtendAuthCommand_Err } from "./pox_StackExtendAuthCommand_Err"; +import { StackExtendSigCommand_Err } from "./pox_StackExtendSigCommand_Err"; +import { StackIncreaseAuthCommand_Err } from "./pox_StackIncreaseAuthCommand_Err"; +import { StackIncreaseSigCommand_Err } from "./pox_StackIncreaseSigCommand_Err"; +import { StackStxAuthCommand_Err } from "./pox_StackStxAuthCommand_Err"; +import { StackStxSigCommand_Err } from "./pox_StackStxSigCommand_Err"; +import { DisallowContractCallerCommand_Err } from "./pox_DisallowContractCallerCommand_Err"; + +const POX_4_ERRORS = { + ERR_STACKING_INSUFFICIENT_FUNDS: 1, + ERR_STACKING_INVALID_LOCK_PERIOD: 2, + ERR_STACKING_ALREADY_STACKED: 3, + ERR_STACKING_NO_SUCH_PRINCIPAL: 4, + ERR_STACKING_PERMISSION_DENIED: 9, + ERR_STACKING_THRESHOLD_NOT_MET: 11, + ERR_STACKING_INVALID_AMOUNT: 18, + ERR_STACKING_ALREADY_DELEGATED: 20, + ERR_DELEGATION_TOO_MUCH_LOCKED: 22, + ERR_STACK_EXTEND_NOT_LOCKED: 26, + ERR_STACKING_IS_DELEGATED: 30, + ERR_STACKING_NOT_DELEGATED: 31, + ERR_DELEGATION_ALREADY_REVOKED: 34, +}; + +export function ErrCommands( + wallets: Map, + stackers: Map, + network: Simnet, +): fc.Arbitrary[] { + const cmds = [ + // StackStxAuthCommand_Err_Stacking_Already_Stacked_1 + fc.record({ + wallet: fc.constantFrom(...wallets.values()), + authId: fc.nat(), + period: fc.integer({ min: 1, max: 12 }), + margin: fc.integer({ min: 1, max: 9 }), + }).map(( + r: { + wallet: Wallet; + authId: number; + period: number; + margin: number; + }, + ) => + new StackStxAuthCommand_Err( + r.wallet, + r.authId, + r.period, + r.margin, + function (this, model) { + const stacker = model.stackers.get(this.wallet.stxAddress)!; + if ( + !isStackingMinimumCalculated(model) || + !isStacking(stacker) || + isDelegating(stacker) + ) return false; + + model.trackCommandRun( + "StackStxAuthCommand_Err_Stacking_Already_Stacked_1", + ); + return true; + }, + POX_4_ERRORS.ERR_STACKING_ALREADY_STACKED, + ) + ), + // StackStxAuthCommand_Err_Stacking_Already_Stacked_2 + fc.record({ + wallet: fc.constantFrom(...wallets.values()), + authId: fc.nat(), + period: fc.integer({ min: 1, max: 12 }), + margin: fc.integer({ min: 1, max: 9 }), + }).map(( + r: { + wallet: Wallet; + authId: number; + period: number; + margin: number; + }, + ) => + new StackStxAuthCommand_Err( + r.wallet, + r.authId, + r.period, + r.margin, + function (this, model) { + const stacker = model.stackers.get(this.wallet.stxAddress)!; + if ( + !isStackingMinimumCalculated(model) || + !isStacking(stacker) || + !isDelegating(stacker) + ) return false; + + model.trackCommandRun( + "StackStxAuthCommand_Err_Stacking_Already_Stacked_2", + ); + return true; + }, + POX_4_ERRORS.ERR_STACKING_ALREADY_STACKED, + ) + ), + // StackStxAuthCommand_Err_Stacking_Already_Delegated + fc.record({ + wallet: fc.constantFrom(...wallets.values()), + authId: fc.nat(), + period: fc.integer({ min: 1, max: 12 }), + margin: fc.integer({ min: 1, max: 9 }), + }).map(( + r: { + wallet: Wallet; + authId: number; + period: number; + margin: number; + }, + ) => + new StackStxAuthCommand_Err( + r.wallet, + r.authId, + r.period, + r.margin, + function (this, model) { + const stacker = model.stackers.get(this.wallet.stxAddress)!; + if ( + !isStackingMinimumCalculated(model) || + isStacking(stacker) || + !isDelegating(stacker) + ) return false; + + model.trackCommandRun( + "StackStxAuthCommand_Err_Stacking_Already_Delegated", + ); + return true; + }, + POX_4_ERRORS.ERR_STACKING_ALREADY_DELEGATED, + ) + ), + // StackStxSigCommand_Err_Stacking_Already_Stacked_1 + fc.record({ + wallet: fc.constantFrom(...wallets.values()), + authId: fc.nat(), + period: fc.integer({ min: 1, max: 12 }), + margin: fc.integer({ min: 1, max: 9 }), + }).map(( + r: { + wallet: Wallet; + authId: number; + period: number; + margin: number; + }, + ) => + new StackStxSigCommand_Err( + r.wallet, + r.authId, + r.period, + r.margin, + function (this, model) { + const stacker = model.stackers.get(this.wallet.stxAddress)!; + if ( + !(isStackingMinimumCalculated(model)) || + !isStacking(stacker) || + isDelegating(stacker) + ) return false; + + model.trackCommandRun( + "StackStxSigCommand_Err_Stacking_Already_Stacked_1", + ); + return true; + }, + POX_4_ERRORS.ERR_STACKING_ALREADY_STACKED, + ) + ), + // StackStxSigCommand_Err_Stacking_Already_Stacked_2 + fc.record({ + wallet: fc.constantFrom(...wallets.values()), + authId: fc.nat(), + period: fc.integer({ min: 1, max: 12 }), + margin: fc.integer({ min: 1, max: 9 }), + }).map(( + r: { + wallet: Wallet; + authId: number; + period: number; + margin: number; + }, + ) => + new StackStxSigCommand_Err( + r.wallet, + r.authId, + r.period, + r.margin, + function (this, model) { + const stacker = model.stackers.get(this.wallet.stxAddress)!; + if ( + !isStackingMinimumCalculated(model) || + !isStacking(stacker) || + !isDelegating(stacker) + ) return false; + + model.trackCommandRun( + "StackStxSigCommand_Err_Stacking_Already_Stacked_2", + ); + return true; + }, + POX_4_ERRORS.ERR_STACKING_ALREADY_STACKED, + ) + ), + // StackStxSigCommand_Err_Stacking_Already_Delegated + fc.record({ + wallet: fc.constantFrom(...wallets.values()), + authId: fc.nat(), + period: fc.integer({ min: 1, max: 12 }), + margin: fc.integer({ min: 1, max: 9 }), + }).map(( + r: { + wallet: Wallet; + authId: number; + period: number; + margin: number; + }, + ) => + new StackStxSigCommand_Err( + r.wallet, + r.authId, + r.period, + r.margin, + function (this, model) { + const stacker = model.stackers.get(this.wallet.stxAddress)!; + if ( + !isStackingMinimumCalculated(model) || + isStacking(stacker) || + !isDelegating(stacker) + ) return false; + + model.trackCommandRun( + "StackStxSigCommand_Err_Stacking_Already_Delegated", + ); + return true; + }, + POX_4_ERRORS.ERR_STACKING_ALREADY_DELEGATED, + ) + ), + // RevokeDelegateStxCommand_Err_Delegation_Already_Revoked + fc.record({ + wallet: fc.constantFrom(...wallets.values()), + }).map(( + r: { + wallet: Wallet; + }, + ) => + new RevokeDelegateStxCommand_Err( + r.wallet, + function (this, model) { + const stacker = model.stackers.get(this.wallet.stxAddress)!; + if ( + !isStackingMinimumCalculated(model) || + isDelegating(stacker) + ) return false; + + model.trackCommandRun( + "RevokeDelegateStxCommand_Err_Delegation_Already_Revoked", + ); + return true; + }, + POX_4_ERRORS.ERR_DELEGATION_ALREADY_REVOKED, + ) + ), + // DelegateStxCommand_Err_Stacking_Already_Delegated + fc.record({ + wallet: fc.constantFrom(...wallets.values()), + delegateTo: fc.constantFrom(...wallets.values()), + untilBurnHt: fc.integer({ min: 1 }), + amount: fc.bigInt({ min: 0n, max: 100_000_000_000_000n }), + }) + .map(( + r: { + wallet: Wallet; + delegateTo: Wallet; + untilBurnHt: number; + amount: bigint; + }, + ) => + new DelegateStxCommand_Err( + r.wallet, + r.delegateTo, + r.untilBurnHt, + r.amount, + function (this, model) { + const stacker = model.stackers.get(this.wallet.stxAddress)!; + if ( + !isStackingMinimumCalculated(model) || + !isDelegating(stacker) + ) return false; + + model.trackCommandRun( + "DelegateStxCommand_Err_Stacking_Already_Delegated", + ); + return true; + }, + POX_4_ERRORS.ERR_STACKING_ALREADY_DELEGATED, + ) + ), + // StackAggregationCommitSigCommand_Err_Stacking_Threshold_Not_Met + fc.record({ + wallet: fc.constantFrom(...wallets.values()), + authId: fc.nat(), + }).map( + (r: { wallet: Wallet; authId: number }) => + new StackAggregationCommitSigCommand_Err( + r.wallet, + r.authId, + function (this, model) { + const operator = model.stackers.get(this.operator.stxAddress)!; + + if ( + !hasLockedStackers(operator) || + isATCAboveThreshold(operator, model) || + !isATCPositive(operator) + ) return false; + + model.trackCommandRun( + "StackAggregationCommitSigCommand_Err_Stacking_Threshold_Not_Met", + ); + return true; + }, + POX_4_ERRORS.ERR_STACKING_THRESHOLD_NOT_MET, + ), + ), + // StackAggregationCommitSigCommand_Err_Stacking_No_Such_Principal_1 + fc.record({ + wallet: fc.constantFrom(...wallets.values()), + authId: fc.nat(), + }).map( + (r: { wallet: Wallet; authId: number }) => + new StackAggregationCommitSigCommand_Err( + r.wallet, + r.authId, + function (this, model) { + const operator = model.stackers.get(this.operator.stxAddress)!; + + if ( + !hasLockedStackers(operator) || + isATCAboveThreshold(operator, model) || + isATCPositive(operator) + ) return false; + + model.trackCommandRun( + "StackAggregationCommitSigCommand_Err_Stacking_No_Such_Principal_1", + ); + return true; + }, + POX_4_ERRORS.ERR_STACKING_NO_SUCH_PRINCIPAL, + ), + ), + // StackAggregationCommitSigCommand_Err_Stacking_No_Such_Principal_2 + fc.record({ + wallet: fc.constantFrom(...wallets.values()), + authId: fc.nat(), + }).map( + (r: { wallet: Wallet; authId: number }) => + new StackAggregationCommitSigCommand_Err( + r.wallet, + r.authId, + function (this, model) { + const operator = model.stackers.get(this.operator.stxAddress)!; + + if ( + hasLockedStackers(operator) || + isATCAboveThreshold(operator, model) + ) return false; + + model.trackCommandRun( + "StackAggregationCommitSigCommand_Err_Stacking_No_Such_Principal_2", + ); + return true; + }, + POX_4_ERRORS.ERR_STACKING_NO_SUCH_PRINCIPAL, + ), + ), + // StackAggregationCommitAuthCommand_Err_Stacking_Threshold_Not_Met + fc.record({ + wallet: fc.constantFrom(...wallets.values()), + authId: fc.nat(), + }).map( + (r: { wallet: Wallet; authId: number }) => + new StackAggregationCommitAuthCommand_Err( + r.wallet, + r.authId, + function (this, model) { + const operator = model.stackers.get(this.operator.stxAddress)!; + + if ( + !hasLockedStackers(operator) || + isATCAboveThreshold(operator, model) || + !isATCPositive(operator) + ) return false; + + model.trackCommandRun( + "StackAggregationCommitAuthCommand_Err_Stacking_Threshold_Not_Met", + ); + return true; + }, + POX_4_ERRORS.ERR_STACKING_THRESHOLD_NOT_MET, + ), + ), + // StackAggregationCommitAuthCommand_Err_Stacking_No_Such_Principal_1 + fc.record({ + wallet: fc.constantFrom(...wallets.values()), + authId: fc.nat(), + }).map( + (r: { wallet: Wallet; authId: number }) => + new StackAggregationCommitAuthCommand_Err( + r.wallet, + r.authId, + function (this, model) { + const operator = model.stackers.get(this.operator.stxAddress)!; + + if ( + !hasLockedStackers(operator) || + isATCAboveThreshold(operator, model) || + isATCPositive(operator) + ) return false; + + model.trackCommandRun( + "StackAggregationCommitAuthCommand_Err_Stacking_No_Such_Principal_1", + ); + return true; + }, + POX_4_ERRORS.ERR_STACKING_NO_SUCH_PRINCIPAL, + ), + ), + // StackAggregationCommitAuthCommand_Err_Stacking_No_Such_Principal_2 + fc.record({ + wallet: fc.constantFrom(...wallets.values()), + authId: fc.nat(), + }).map( + (r: { wallet: Wallet; authId: number }) => + new StackAggregationCommitAuthCommand_Err( + r.wallet, + r.authId, + function (this, model) { + const operator = model.stackers.get(this.operator.stxAddress)!; + + if ( + hasLockedStackers(operator) || + isATCAboveThreshold(operator, model) + ) return false; + + model.trackCommandRun( + "StackAggregationCommitAuthCommand_Err_Stacking_No_Such_Principal_2", + ); + return true; + }, + POX_4_ERRORS.ERR_STACKING_NO_SUCH_PRINCIPAL, + ), + ), + // StackAggregationCommitIndexedSigCommand_Err_Stacking_Threshold_Not_Met + fc.record({ + wallet: fc.constantFrom(...wallets.values()), + authId: fc.nat(), + }).map( + (r: { wallet: Wallet; authId: number }) => + new StackAggregationCommitIndexedSigCommand_Err( + r.wallet, + r.authId, + function (this, model) { + const operator = model.stackers.get(this.operator.stxAddress)!; + + if ( + !hasLockedStackers(operator) || + isATCAboveThreshold(operator, model) || + !isATCPositive(operator) + ) return false; + + model.trackCommandRun( + "StackAggregationCommitIndexedSigCommand_Err_Stacking_Threshold_Not_Met", + ); + return true; + }, + POX_4_ERRORS.ERR_STACKING_THRESHOLD_NOT_MET, + ), + ), + // StackAggregationCommitIndexedSigCommand_Err_Stacking_No_Such_Principal_1 + fc.record({ + wallet: fc.constantFrom(...wallets.values()), + authId: fc.nat(), + }).map( + (r: { wallet: Wallet; authId: number }) => + new StackAggregationCommitIndexedSigCommand_Err( + r.wallet, + r.authId, + function (this, model) { + const operator = model.stackers.get(this.operator.stxAddress)!; + + if ( + !hasLockedStackers(operator) || + isATCAboveThreshold(operator, model) || + isATCPositive(operator) + ) return false; + + model.trackCommandRun( + "StackAggregationCommitIndexedSigCommand_Err_Stacking_No_Such_Principal_1", + ); + return true; + }, + POX_4_ERRORS.ERR_STACKING_NO_SUCH_PRINCIPAL, + ), + ), + // StackAggregationCommitIndexedSigCommand_Err_Stacking_No_Such_Principal_2 + fc.record({ + wallet: fc.constantFrom(...wallets.values()), + authId: fc.nat(), + }).map( + (r: { wallet: Wallet; authId: number }) => + new StackAggregationCommitIndexedSigCommand_Err( + r.wallet, + r.authId, + function (this, model) { + const operator = model.stackers.get(this.operator.stxAddress)!; + + if ( + hasLockedStackers(operator) || + isATCAboveThreshold(operator, model) + ) return false; + + model.trackCommandRun( + "StackAggregationCommitIndexedSigCommand_Err_Stacking_No_Such_Principal_2", + ); + return true; + }, + POX_4_ERRORS.ERR_STACKING_NO_SUCH_PRINCIPAL, + ), + ), + // StackAggregationCommitIndexedAuthCommand_Err_Stacking_No_Such_Principal_1 + fc.record({ + wallet: fc.constantFrom(...wallets.values()), + authId: fc.nat(), + }).map( + (r: { wallet: Wallet; authId: number }) => + new StackAggregationCommitIndexedAuthCommand_Err( + r.wallet, + r.authId, + function (this, model) { + const operator = model.stackers.get(this.operator.stxAddress)!; + + if ( + !hasLockedStackers(operator) || + isATCAboveThreshold(operator, model) || + isATCPositive(operator) + ) return false; + + model.trackCommandRun( + "StackAggregationCommitIndexedAuthCommand_Err_Stacking_No_Such_Principal_1", + ); + return true; + }, + POX_4_ERRORS.ERR_STACKING_NO_SUCH_PRINCIPAL, + ), + ), + // StackAggregationCommitIndexedAuthCommand_Err_Stacking_No_Such_Principal_2 + fc.record({ + wallet: fc.constantFrom(...wallets.values()), + authId: fc.nat(), + }).map( + (r: { wallet: Wallet; authId: number }) => + new StackAggregationCommitIndexedAuthCommand_Err( + r.wallet, + r.authId, + function (this, model) { + const operator = model.stackers.get(this.operator.stxAddress)!; + + if ( + hasLockedStackers(operator) || + isATCAboveThreshold(operator, model) + ) return false; + + model.trackCommandRun( + "StackAggregationCommitIndexedAuthCommand_Err_Stacking_No_Such_Principal_2", + ); + return true; + }, + POX_4_ERRORS.ERR_STACKING_NO_SUCH_PRINCIPAL, + ), + ), + // StackAggregationCommitIndexedAuthCommand_Err_Stacking_Threshold_Not_Met + fc.record({ + wallet: fc.constantFrom(...wallets.values()), + authId: fc.nat(), + }).map( + (r: { wallet: Wallet; authId: number }) => + new StackAggregationCommitIndexedAuthCommand_Err( + r.wallet, + r.authId, + function (this, model) { + const operator = model.stackers.get(this.operator.stxAddress)!; + + if ( + !hasLockedStackers(operator) || + isATCAboveThreshold(operator, model) || + !isATCPositive(operator) + ) return false; + + model.trackCommandRun( + "StackAggregationCommitIndexedAuthCommand_Err_Stacking_Threshold_Not_Met", + ); + return true; + }, + POX_4_ERRORS.ERR_STACKING_THRESHOLD_NOT_MET, + ), + ), + // StackAggregationIncreaseCommand_Err_Stacking_No_Such_Principal + fc.record({ + wallet: fc.constantFrom(...wallets.values()), + authId: fc.nat(), + }).chain((r) => { + const operator = stackers.get(r.wallet.stxAddress)!; + const committedRewCycleIndexesOrFallback = + operator.committedRewCycleIndexes.length > 0 + ? operator.committedRewCycleIndexes + : [-1]; + return fc + .record({ + rewardCycleIndex: fc.constantFrom( + ...committedRewCycleIndexesOrFallback, + ), + }) + .map((cycleIndex) => ({ ...r, ...cycleIndex })); + }).map( + (r: { wallet: Wallet; rewardCycleIndex: number; authId: number }) => + new StackAggregationIncreaseCommand_Err( + r.wallet, + r.rewardCycleIndex, + r.authId, + function (this, model) { + const operator = model.stackers.get(this.operator.stxAddress)!; + if ( + !hasLockedStackers(operator) || + !isPositive(this.rewardCycleIndex) || + isATCPositive(operator) + ) return false; + + model.trackCommandRun( + "StackAggregationIncreaseCommand_Err_Stacking_No_Such_Principal", + ); + return true; + }, + POX_4_ERRORS.ERR_STACKING_NO_SUCH_PRINCIPAL, + ), + ), + // DelegateStackStxCommand_Err_Delegation_Too_Much_Locked + fc.record({ + operator: fc.constantFrom(...wallets.values()), + startBurnHt: fc.integer({ + min: currentCycleFirstBlock(network), + max: nextCycleFirstBlock(network), + }), + period: fc.integer({ min: 1, max: 12 }), + }).chain((r) => { + const operator = stackers.get(r.operator.stxAddress)!; + // Determine available stackers based on the operator + const availableStackers = operator.poolMembers.length > 0 + ? operator.poolMembers + : [r.operator.stxAddress]; + + return fc.record({ + stacker: fc.constantFrom(...availableStackers), + }).map((stacker) => ({ + ...r, + stacker: wallets.get(stacker.stacker)!, + })).chain((resultWithStacker) => { + return fc.record({ + unlockBurnHt: fc.constant( + currentCycleFirstBlock(network) + + 1050 * (resultWithStacker.period + 1), + ), + }).map((additionalProps) => ({ + ...resultWithStacker, + ...additionalProps, + })); + }).chain((resultWithUnlockHeight) => { + return fc.record({ + amount: fc.bigInt({ + min: 0n, + max: 100_000_000_000_000n, + }), + }).map((amountProps) => ({ + ...resultWithUnlockHeight, + ...amountProps, + })); + }); + }).map((finalResult) => { + return new DelegateStackStxCommand_Err( + finalResult.operator, + finalResult.stacker, + finalResult.period, + finalResult.amount, + finalResult.unlockBurnHt, + function (this, model) { + const operatorWallet = model.stackers.get(this.operator.stxAddress)!; + const stackerWallet = model.stackers.get(this.stacker.stxAddress)!; + if ( + !isStackingMinimumCalculated(model) || + isStacking(stackerWallet) || + !isDelegating(stackerWallet) || + isAmountWithinDelegationLimit(stackerWallet, this.amountUstx) || + !isAmountWithinBalance(stackerWallet, this.amountUstx) || + !isAmountAboveThreshold(model, this.amountUstx) || + !isStackerInOperatorPool(operatorWallet, this.stacker) || + !isUBHWithinDelegationLimit(stackerWallet, this.unlockBurnHt) + ) return false; + + model.trackCommandRun( + "DelegateStackStxCommand_Err_Delegation_Too_Much_Locked", + ); + return true; + }, + POX_4_ERRORS.ERR_DELEGATION_TOO_MUCH_LOCKED, + ); + }), + // DelegateStackStxCommand_Err_Stacking_Permission_Denied + fc.record({ + operator: fc.constantFrom(...wallets.values()), + startBurnHt: fc.integer({ + min: currentCycleFirstBlock(network), + max: nextCycleFirstBlock(network), + }), + period: fc.integer({ min: 1, max: 12 }), + }).chain((r) => { + const operator = stackers.get(r.operator.stxAddress)!; + // Determine available stackers based on the operator + const availableStackers = operator.poolMembers.length > 0 + ? operator.poolMembers + : [r.operator.stxAddress]; + + return fc.record({ + stacker: fc.constantFrom(...availableStackers), + }).map((stacker) => ({ + ...r, + stacker: wallets.get(stacker.stacker)!, + })).chain((resultWithStacker) => { + return fc.record({ + unlockBurnHt: fc.constant( + currentCycleFirstBlock(network) + + 1050 * (resultWithStacker.period + 1), + ), + }).map((additionalProps) => ({ + ...resultWithStacker, + ...additionalProps, + })); + }).chain((resultWithUnlockHeight) => { + return fc.record({ + amount: fc.bigInt({ + min: 0n, + max: BigInt( + stackers.get(resultWithUnlockHeight.stacker.stxAddress)! + .delegatedMaxAmount, + ), + }), + }).map((amountProps) => ({ + ...resultWithUnlockHeight, + ...amountProps, + })); + }); + }).map((finalResult) => { + return new DelegateStackStxCommand_Err( + finalResult.operator, + finalResult.stacker, + finalResult.period, + finalResult.amount, + finalResult.unlockBurnHt, + function (this, model) { + const operatorWallet = model.stackers.get(this.operator.stxAddress)!; + const stackerWallet = model.stackers.get(this.stacker.stxAddress)!; + if ( + !isStackingMinimumCalculated(model) || + isStacking(stackerWallet) || + !isDelegating(stackerWallet) || + !isAmountWithinDelegationLimit(stackerWallet, this.amountUstx) || + !isAmountWithinBalance(stackerWallet, this.amountUstx) || + !isAmountAboveThreshold(model, this.amountUstx) || + isStackerInOperatorPool(operatorWallet, this.stacker) || + !isUBHWithinDelegationLimit(stackerWallet, this.unlockBurnHt) + ) return false; + + model.trackCommandRun( + "DelegateStackStxCommand_Err_Stacking_Permission_Denied_1", + ); + return true; + }, + POX_4_ERRORS.ERR_STACKING_PERMISSION_DENIED, + ); + }), + // DelegateStackStxCommand_Err_Stacking_Permission_Denied_2 + fc.record({ + operator: fc.constantFrom(...wallets.values()), + startBurnHt: fc.integer({ + min: currentCycleFirstBlock(network), + max: nextCycleFirstBlock(network), + }), + period: fc.integer({ min: 1, max: 12 }), + }).chain((r) => { + const operator = stackers.get(r.operator.stxAddress)!; + // Determine available stackers based on the operator + const availableStackers = operator.poolMembers.length > 0 + ? operator.poolMembers + : [r.operator.stxAddress]; + + return fc.record({ + stacker: fc.constantFrom(...availableStackers), + }).map((stacker) => ({ + ...r, + stacker: wallets.get(stacker.stacker)!, + })).chain((resultWithStacker) => { + return fc.record({ + unlockBurnHt: fc.constant( + currentCycleFirstBlock(network) + + 1050 * (resultWithStacker.period + 1), + ), + }).map((additionalProps) => ({ + ...resultWithStacker, + ...additionalProps, + })); + }).chain((resultWithUnlockHeight) => { + return fc.record({ + amount: fc.bigInt({ + min: 0n, + max: 100_000_000_000_000n, + }), + }).map((amountProps) => ({ + ...resultWithUnlockHeight, + ...amountProps, + })); + }); + }).map((finalResult) => { + return new DelegateStackStxCommand_Err( + finalResult.operator, + finalResult.stacker, + finalResult.period, + finalResult.amount, + finalResult.unlockBurnHt, + function (this, model) { + const operatorWallet = model.stackers.get(this.operator.stxAddress)!; + const stackerWallet = model.stackers.get(this.stacker.stxAddress)!; + if ( + !isStackingMinimumCalculated(model) || + isStacking(stackerWallet) || + isDelegating(stackerWallet) || + isAmountWithinDelegationLimit(stackerWallet, this.amountUstx) || + !isAmountWithinBalance(stackerWallet, this.amountUstx) || + !isAmountAboveThreshold(model, this.amountUstx) || + isStackerInOperatorPool(operatorWallet, this.stacker) || + isUBHWithinDelegationLimit(stackerWallet, this.unlockBurnHt) + ) return false; + + model.trackCommandRun( + "DelegateStackStxCommand_Err_Stacking_Permission_Denied_2", + ); + return true; + }, + POX_4_ERRORS.ERR_STACKING_PERMISSION_DENIED, + ); + }), + // StackIncreaseSigCommand_Err_Stacking_Is_Delegated + fc.record({ + operator: fc.constantFrom(...wallets.values()), + increaseBy: fc.nat(), + authId: fc.nat(), + }).map( + (r) => + new StackIncreaseSigCommand_Err( + r.operator, + r.increaseBy, + r.authId, + function (this, model) { + const stacker = model.stackers.get(this.wallet.stxAddress)!; + if ( + !isStackingMinimumCalculated(model) || + !isStacking(stacker) || + isStackingSolo(stacker) || + isDelegating(stacker) || + !isAmountLockedPositive(stacker) || + !isIncreaseByWithinUnlockedBalance(stacker, this.increaseBy) || + !isIncreaseByGTZero(this.increaseBy) + ) return false; + + model.trackCommandRun( + "StackIncreaseSigCommand_Err_Stacking_Is_Delegated", + ); + return true; + }, + POX_4_ERRORS.ERR_STACKING_IS_DELEGATED, + ), + ), + // StackIncreaseSigCommand_Err_Stacking_Insufficient_Funds + fc.record({ + operator: fc.constantFrom(...wallets.values()), + increaseBy: fc.constant(100_000_000_000_000), + authId: fc.nat(), + }).map( + (r) => + new StackIncreaseSigCommand_Err( + r.operator, + r.increaseBy, + r.authId, + function (this, model) { + const stacker = model.stackers.get(this.wallet.stxAddress)!; + if ( + !isStackingMinimumCalculated(model) || + !isStacking(stacker) || + !isStackingSolo(stacker) || + isDelegating(stacker) || + !isAmountLockedPositive(stacker) || + isIncreaseByWithinUnlockedBalance(stacker, this.increaseBy) || + !isIncreaseByGTZero(this.increaseBy) + ) return false; + + model.trackCommandRun( + "StackIncreaseSigCommand_Err_Stacking_Insufficient_Funds", + ); + return true; + }, + POX_4_ERRORS.ERR_STACKING_INSUFFICIENT_FUNDS, + ), + ), + // StackIncreaseSigCommand_Err_Stacking_Invalid_Amount + fc.record({ + operator: fc.constantFrom(...wallets.values()), + increaseBy: fc.constant(0), + authId: fc.nat(), + }).map( + (r) => + new StackIncreaseSigCommand_Err( + r.operator, + r.increaseBy, + r.authId, + function (this, model) { + const stacker = model.stackers.get(this.wallet.stxAddress)!; + if ( + !isStackingMinimumCalculated(model) || + !isStacking(stacker) || + !isStackingSolo(stacker) || + isDelegating(stacker) || + !isAmountLockedPositive(stacker) || + !isIncreaseByWithinUnlockedBalance(stacker, this.increaseBy) || + isIncreaseByGTZero(this.increaseBy) + ) return false; + + model.trackCommandRun( + "StackIncreaseSigCommand_Err_Stacking_Invalid_Amount", + ); + return true; + }, + POX_4_ERRORS.ERR_STACKING_INVALID_AMOUNT, + ), + ), + // StackIncreaseAuthCommand_Err_Stacking_Is_Delegated + fc.record({ + operator: fc.constantFrom(...wallets.values()), + increaseBy: fc.nat(), + authId: fc.nat(), + }).map( + (r) => + new StackIncreaseAuthCommand_Err( + r.operator, + r.increaseBy, + r.authId, + function (this, model) { + const stacker = model.stackers.get(this.wallet.stxAddress)!; + if ( + !isStackingMinimumCalculated(model) || + !isStacking(stacker) || + isStackingSolo(stacker) || + isDelegating(stacker) || + !isAmountLockedPositive(stacker) || + !isIncreaseByWithinUnlockedBalance(stacker, this.increaseBy) || + !isIncreaseByGTZero(this.increaseBy) + ) return false; + + model.trackCommandRun( + "StackIncreaseAuthCommand_Err_Stacking_Is_Delegated", + ); + return true; + }, + POX_4_ERRORS.ERR_STACKING_IS_DELEGATED, + ), + ), + // StackIncreaseAuthCommand_Err_Stacking_Insufficient_Funds + fc.record({ + operator: fc.constantFrom(...wallets.values()), + increaseBy: fc.constant(100_000_000_000_000), + authId: fc.nat(), + }).map( + (r) => + new StackIncreaseAuthCommand_Err( + r.operator, + r.increaseBy, + r.authId, + function (this, model) { + const stacker = model.stackers.get(this.wallet.stxAddress)!; + if ( + !isStackingMinimumCalculated(model) || + !isStacking(stacker) || + !isStackingSolo(stacker) || + isDelegating(stacker) || + !isAmountLockedPositive(stacker) || + isIncreaseByWithinUnlockedBalance(stacker, this.increaseBy) || + !isIncreaseByGTZero(this.increaseBy) + ) return false; + + model.trackCommandRun( + "StackIncreaseAuthCommand_Err_Stacking_Insufficient_Funds", + ); + return true; + }, + POX_4_ERRORS.ERR_STACKING_INSUFFICIENT_FUNDS, + ), + ), + // StackIncreaseAuthCommand_Err_Stacking_Invalid_Amount + fc.record({ + operator: fc.constantFrom(...wallets.values()), + increaseBy: fc.constant(0), + authId: fc.nat(), + }).map( + (r) => + new StackIncreaseAuthCommand_Err( + r.operator, + r.increaseBy, + r.authId, + function (this, model) { + const stacker = model.stackers.get(this.wallet.stxAddress)!; + if ( + !isStackingMinimumCalculated(model) || + !isStacking(stacker) || + !isStackingSolo(stacker) || + isDelegating(stacker) || + !isAmountLockedPositive(stacker) || + !isIncreaseByWithinUnlockedBalance(stacker, this.increaseBy) || + isIncreaseByGTZero(this.increaseBy) + ) return false; + + model.trackCommandRun( + "StackIncreaseAuthCommand_Err_Stacking_Invalid_Amount", + ); + return true; + }, + POX_4_ERRORS.ERR_STACKING_INVALID_AMOUNT, + ), + ), + // StackExtendSigCommand_Err_Stacking_Is_Delegated_1 + fc.record({ + wallet: fc.constantFrom(...wallets.values()), + authId: fc.nat(), + extendCount: fc.integer({ min: 1, max: 12 }), + currentCycle: fc.constant(currentCycle(network)), + }).map( + (r: { + wallet: Wallet; + extendCount: number; + authId: number; + currentCycle: number; + }) => + new StackExtendSigCommand_Err( + r.wallet, + r.extendCount, + r.authId, + r.currentCycle, + function (this, model) { + const stacker = model.stackers.get(this.wallet.stxAddress)!; + + const firstRewardCycle = Math.max( + stacker.firstLockedRewardCycle, + this.currentCycle, + ); + const firstExtendCycle = Math.floor( + (stacker.unlockHeight - FIRST_BURNCHAIN_BLOCK_HEIGHT) / + REWARD_CYCLE_LENGTH, + ); + const lastExtendCycle = firstExtendCycle + this.extendCount - 1; + const totalPeriod = lastExtendCycle - firstRewardCycle + 1; + if ( + !isStackingMinimumCalculated(model) || + !isStacking(stacker) || + isStackingSolo(stacker) || + isDelegating(stacker) || + !isAmountLockedPositive(stacker) || + hasLockedStackers(stacker) || + !isPeriodWithinMax(totalPeriod) + ) return false; + + model.trackCommandRun( + "StackExtendSigCommand_Err_Stacking_Is_Delegated_1", + ); + return true; + }, + POX_4_ERRORS.ERR_STACKING_IS_DELEGATED, + ), + ), + // StackExtendSigCommand_Err_Stacking_Is_Delegated_2 + fc.record({ + wallet: fc.constantFrom(...wallets.values()), + authId: fc.nat(), + extendCount: fc.integer({ min: 1, max: 12 }), + currentCycle: fc.constant(currentCycle(network)), + }).map( + (r: { + wallet: Wallet; + extendCount: number; + authId: number; + currentCycle: number; + }) => + new StackExtendSigCommand_Err( + r.wallet, + r.extendCount, + r.authId, + r.currentCycle, + function (this, model) { + const stacker = model.stackers.get(this.wallet.stxAddress)!; + + const firstRewardCycle = Math.max( + stacker.firstLockedRewardCycle, + this.currentCycle, + ); + const firstExtendCycle = Math.floor( + (stacker.unlockHeight - FIRST_BURNCHAIN_BLOCK_HEIGHT) / + REWARD_CYCLE_LENGTH, + ); + const lastExtendCycle = firstExtendCycle + this.extendCount - 1; + const totalPeriod = lastExtendCycle - firstRewardCycle + 1; + if ( + !isStackingMinimumCalculated(model) || + !isStacking(stacker) || + isStackingSolo(stacker) || + isDelegating(stacker) || + !isAmountLockedPositive(stacker) || + !hasPoolMembers(stacker) || + !isPeriodWithinMax(totalPeriod) + ) return false; + + model.trackCommandRun( + "StackExtendSigCommand_Err_Stacking_Is_Delegated_2", + ); + return true; + }, + POX_4_ERRORS.ERR_STACKING_IS_DELEGATED, + ), + ), + // StackExtendSigCommand_Err_Stacking_Already_Delegated + fc.record({ + wallet: fc.constantFrom(...wallets.values()), + authId: fc.nat(), + extendCount: fc.integer({ min: 1, max: 12 }), + currentCycle: fc.constant(currentCycle(network)), + }).map( + (r: { + wallet: Wallet; + extendCount: number; + authId: number; + currentCycle: number; + }) => + new StackExtendSigCommand_Err( + r.wallet, + r.extendCount, + r.authId, + r.currentCycle, + function (this, model) { + const stacker = model.stackers.get(this.wallet.stxAddress)!; + + const firstRewardCycle = Math.max( + stacker.firstLockedRewardCycle, + this.currentCycle, + ); + const firstExtendCycle = Math.floor( + (stacker.unlockHeight - FIRST_BURNCHAIN_BLOCK_HEIGHT) / + REWARD_CYCLE_LENGTH, + ); + const lastExtendCycle = firstExtendCycle + this.extendCount - 1; + const totalPeriod = lastExtendCycle - firstRewardCycle + 1; + if ( + !isStackingMinimumCalculated(model) || + !isStacking(stacker) || + !isStackingSolo(stacker) || + !isDelegating(stacker) || + !isAmountLockedPositive(stacker) || + hasLockedStackers(stacker) || + !isPeriodWithinMax(totalPeriod) + ) return false; + + model.trackCommandRun( + "StackExtendSigCommand_Err_Stacking_Already_Delegated", + ); + return true; + }, + POX_4_ERRORS.ERR_STACKING_ALREADY_DELEGATED, + ), + ), + // StackExtendSigCommand_Err_Stacking_Invalid_Lock_Period + fc.record({ + wallet: fc.constantFrom(...wallets.values()), + authId: fc.nat(), + extendCount: fc.integer(), + currentCycle: fc.constant(currentCycle(network)), + }).map( + (r: { + wallet: Wallet; + extendCount: number; + authId: number; + currentCycle: number; + }) => + new StackExtendSigCommand_Err( + r.wallet, + r.extendCount, + r.authId, + r.currentCycle, + function (this, model) { + const stacker = model.stackers.get(this.wallet.stxAddress)!; + + const firstRewardCycle = Math.max( + stacker.firstLockedRewardCycle, + this.currentCycle, + ); + const firstExtendCycle = Math.floor( + (stacker.unlockHeight - FIRST_BURNCHAIN_BLOCK_HEIGHT) / + REWARD_CYCLE_LENGTH, + ); + const lastExtendCycle = firstExtendCycle + this.extendCount - 1; + const totalPeriod = lastExtendCycle - firstRewardCycle + 1; + if ( + !isStackingMinimumCalculated(model) || + !isStacking(stacker) || + !isStackingSolo(stacker) || + isDelegating(stacker) || + !isAmountLockedPositive(stacker) || + hasLockedStackers(stacker) || + isPeriodWithinMax(totalPeriod) + ) return false; + + model.trackCommandRun( + "StackExtendSigCommand_Err_Stacking_Invalid_Lock_Period", + ); + return true; + }, + POX_4_ERRORS.ERR_STACKING_INVALID_LOCK_PERIOD, + ), + ), + // StackExtendSigCommand_Err_Stack_Extend_Not_Locked + fc.record({ + wallet: fc.constantFrom(...wallets.values()), + authId: fc.nat(), + extendCount: fc.integer({ min: 1, max: 12 }), + currentCycle: fc.constant(currentCycle(network)), + }).map( + (r: { + wallet: Wallet; + extendCount: number; + authId: number; + currentCycle: number; + }) => + new StackExtendSigCommand_Err( + r.wallet, + r.extendCount, + r.authId, + r.currentCycle, + function (this, model) { + const stacker = model.stackers.get(this.wallet.stxAddress)!; + + const firstRewardCycle = Math.max( + stacker.firstLockedRewardCycle, + this.currentCycle, + ); + const firstExtendCycle = Math.floor( + (stacker.unlockHeight - FIRST_BURNCHAIN_BLOCK_HEIGHT) / + REWARD_CYCLE_LENGTH, + ); + const lastExtendCycle = firstExtendCycle + this.extendCount - 1; + const totalPeriod = lastExtendCycle - firstRewardCycle + 1; + if ( + !isStackingMinimumCalculated(model) || + isStacking(stacker) || + isStackingSolo(stacker) || + isDelegating(stacker) || + isAmountLockedPositive(stacker) || + hasLockedStackers(stacker) || + !isPeriodWithinMax(totalPeriod) + ) return false; + + model.trackCommandRun( + "StackExtendSigCommand_Err_Stack_Extend_Not_Locked", + ); + return true; + }, + POX_4_ERRORS.ERR_STACK_EXTEND_NOT_LOCKED, + ), + ), + // StackExtendAuthCommand_Err_Stacking_Is_Delegated_1 + fc.record({ + wallet: fc.constantFrom(...wallets.values()), + authId: fc.nat(), + extendCount: fc.integer({ min: 1, max: 12 }), + currentCycle: fc.constant(currentCycle(network)), + }).map( + (r: { + wallet: Wallet; + extendCount: number; + authId: number; + currentCycle: number; + }) => + new StackExtendAuthCommand_Err( + r.wallet, + r.extendCount, + r.authId, + r.currentCycle, + function (this, model) { + const stacker = model.stackers.get(this.wallet.stxAddress)!; + + const firstRewardCycle = Math.max( + stacker.firstLockedRewardCycle, + this.currentCycle, + ); + const firstExtendCycle = Math.floor( + (stacker.unlockHeight - FIRST_BURNCHAIN_BLOCK_HEIGHT) / + REWARD_CYCLE_LENGTH, + ); + const lastExtendCycle = firstExtendCycle + this.extendCount - 1; + const totalPeriod = lastExtendCycle - firstRewardCycle + 1; + if ( + !isStackingMinimumCalculated(model) || + !isStacking(stacker) || + isStackingSolo(stacker) || + isDelegating(stacker) || + !isAmountLockedPositive(stacker) || + hasLockedStackers(stacker) || + !isPeriodWithinMax(totalPeriod) + ) return false; + + model.trackCommandRun( + "StackExtendAuthCommand_Err_Stacking_Is_Delegated_1", + ); + return true; + }, + POX_4_ERRORS.ERR_STACKING_IS_DELEGATED, + ), + ), + // StackExtendAuthCommand_Err_Stacking_Is_Delegated_2 + fc.record({ + wallet: fc.constantFrom(...wallets.values()), + authId: fc.nat(), + extendCount: fc.integer({ min: 1, max: 12 }), + currentCycle: fc.constant(currentCycle(network)), + }).map( + (r: { + wallet: Wallet; + extendCount: number; + authId: number; + currentCycle: number; + }) => + new StackExtendAuthCommand_Err( + r.wallet, + r.extendCount, + r.authId, + r.currentCycle, + function (this, model) { + const stacker = model.stackers.get(this.wallet.stxAddress)!; + + const firstRewardCycle = Math.max( + stacker.firstLockedRewardCycle, + this.currentCycle, + ); + const firstExtendCycle = Math.floor( + (stacker.unlockHeight - FIRST_BURNCHAIN_BLOCK_HEIGHT) / + REWARD_CYCLE_LENGTH, + ); + const lastExtendCycle = firstExtendCycle + this.extendCount - 1; + const totalPeriod = lastExtendCycle - firstRewardCycle + 1; + if ( + !isStackingMinimumCalculated(model) || + !isStacking(stacker) || + isStackingSolo(stacker) || + isDelegating(stacker) || + !isAmountLockedPositive(stacker) || + !hasPoolMembers(stacker) || + !isPeriodWithinMax(totalPeriod) + ) return false; + + model.trackCommandRun( + "StackExtendAuthCommand_Err_Stacking_Is_Delegated_2", + ); + return true; + }, + POX_4_ERRORS.ERR_STACKING_IS_DELEGATED, + ), + ), + // StackExtendAuthCommand_Err_Stacking_Already_Delegated + fc.record({ + wallet: fc.constantFrom(...wallets.values()), + authId: fc.nat(), + extendCount: fc.integer({ min: 1, max: 12 }), + currentCycle: fc.constant(currentCycle(network)), + }).map( + (r: { + wallet: Wallet; + extendCount: number; + authId: number; + currentCycle: number; + }) => + new StackExtendAuthCommand_Err( + r.wallet, + r.extendCount, + r.authId, + r.currentCycle, + function (this, model) { + const stacker = model.stackers.get(this.wallet.stxAddress)!; + + const firstRewardCycle = Math.max( + stacker.firstLockedRewardCycle, + this.currentCycle, + ); + const firstExtendCycle = Math.floor( + (stacker.unlockHeight - FIRST_BURNCHAIN_BLOCK_HEIGHT) / + REWARD_CYCLE_LENGTH, + ); + const lastExtendCycle = firstExtendCycle + this.extendCount - 1; + const totalPeriod = lastExtendCycle - firstRewardCycle + 1; + if ( + !isStackingMinimumCalculated(model) || + !isStacking(stacker) || + !isStackingSolo(stacker) || + !isDelegating(stacker) || + !isAmountLockedPositive(stacker) || + hasLockedStackers(stacker) || + !isPeriodWithinMax(totalPeriod) + ) return false; + + model.trackCommandRun( + "StackExtendAuthCommand_Err_Stacking_Already_Delegated", + ); + return true; + }, + POX_4_ERRORS.ERR_STACKING_ALREADY_DELEGATED, + ), + ), + // StackExtendAuthCommand_Err_Stacking_Invalid_Lock_Period + fc.record({ + wallet: fc.constantFrom(...wallets.values()), + authId: fc.nat(), + extendCount: fc.integer(), + currentCycle: fc.constant(currentCycle(network)), + }).map( + (r: { + wallet: Wallet; + extendCount: number; + authId: number; + currentCycle: number; + }) => + new StackExtendAuthCommand_Err( + r.wallet, + r.extendCount, + r.authId, + r.currentCycle, + function (this, model) { + const stacker = model.stackers.get(this.wallet.stxAddress)!; + + const firstRewardCycle = Math.max( + stacker.firstLockedRewardCycle, + this.currentCycle, + ); + const firstExtendCycle = Math.floor( + (stacker.unlockHeight - FIRST_BURNCHAIN_BLOCK_HEIGHT) / + REWARD_CYCLE_LENGTH, + ); + const lastExtendCycle = firstExtendCycle + this.extendCount - 1; + const totalPeriod = lastExtendCycle - firstRewardCycle + 1; + if ( + !isStackingMinimumCalculated(model) || + !isStacking(stacker) || + !isStackingSolo(stacker) || + isDelegating(stacker) || + !isAmountLockedPositive(stacker) || + hasLockedStackers(stacker) || + isPeriodWithinMax(totalPeriod) + ) return false; + + model.trackCommandRun( + "StackExtendAuthCommand_Err_Stacking_Invalid_Lock_Period", + ); + return true; + }, + POX_4_ERRORS.ERR_STACKING_INVALID_LOCK_PERIOD, + ), + ), + // StackExtendAuthCommand_Err_Stack_Extend_Not_Locked + fc.record({ + wallet: fc.constantFrom(...wallets.values()), + authId: fc.nat(), + extendCount: fc.integer({ min: 1, max: 12 }), + currentCycle: fc.constant(currentCycle(network)), + }).map( + (r: { + wallet: Wallet; + extendCount: number; + authId: number; + currentCycle: number; + }) => + new StackExtendAuthCommand_Err( + r.wallet, + r.extendCount, + r.authId, + r.currentCycle, + function (this, model) { + const stacker = model.stackers.get(this.wallet.stxAddress)!; + + const firstRewardCycle = Math.max( + stacker.firstLockedRewardCycle, + this.currentCycle, + ); + const firstExtendCycle = Math.floor( + (stacker.unlockHeight - FIRST_BURNCHAIN_BLOCK_HEIGHT) / + REWARD_CYCLE_LENGTH, + ); + const lastExtendCycle = firstExtendCycle + this.extendCount - 1; + const totalPeriod = lastExtendCycle - firstRewardCycle + 1; + if ( + !isStackingMinimumCalculated(model) || + isStacking(stacker) || + isStackingSolo(stacker) || + isDelegating(stacker) || + isAmountLockedPositive(stacker) || + hasLockedStackers(stacker) || + !isPeriodWithinMax(totalPeriod) + ) return false; + + model.trackCommandRun( + "StackExtendAuthCommand_Err_Stack_Extend_Not_Locked", + ); + return true; + }, + POX_4_ERRORS.ERR_STACK_EXTEND_NOT_LOCKED, + ), + ), + // DelegateStackExtendCommand_Err_Stacking_Invalid_Lock_Period + fc.record({ + operator: fc.constantFrom(...wallets.values()), + extendCount: fc.constant(100000000000000), + }).chain((r) => { + const operator = stackers.get(r.operator.stxAddress)!; + const delegatorsList = operator.poolMembers; + const availableStackers = delegatorsList.filter((delegator) => { + const delegatorWallet = stackers.get(delegator)!; + return delegatorWallet.unlockHeight > nextCycleFirstBlock(network); + }); + + const availableStackersOrFallback = availableStackers.length === 0 + ? [r.operator.stxAddress] + : availableStackers; + + return fc.record({ + stacker: fc.constantFrom(...availableStackersOrFallback), + currentCycle: fc.constant(currentCycle(network)), + }).map((additionalProps) => ({ + ...r, + stacker: wallets.get(additionalProps.stacker)!, + currentCycle: additionalProps.currentCycle, + })); + }).map( + (final) => + new DelegateStackExtendCommand_Err( + final.operator, + final.stacker, + final.extendCount, + final.currentCycle, + function (this, model) { + const operator = model.stackers.get( + this.operator.stxAddress, + )!; + const stacker = model.stackers.get( + this.stacker.stxAddress, + )!; + + const firstRewardCycle = Math.max( + stacker.firstLockedRewardCycle, + this.currentCycle, + ); + const firstExtendCycle = Math.floor( + (stacker.unlockHeight - FIRST_BURNCHAIN_BLOCK_HEIGHT) / + REWARD_CYCLE_LENGTH, + ); + const lastExtendCycle = firstExtendCycle + this.extendCount - 1; + const totalPeriod = lastExtendCycle - firstRewardCycle + 1; + const newUnlockHeight = + REWARD_CYCLE_LENGTH * (firstRewardCycle + totalPeriod - 1) + + FIRST_BURNCHAIN_BLOCK_HEIGHT; + const stackedAmount = stacker.amountLocked; + + if ( + !isAmountLockedPositive(stacker) || + !isDelegating(stacker) || + !isStacking(stacker) || + !isStackerDelegatingToOperator(stacker, this.operator) || + isUBHWithinDelegationLimit(stacker, newUnlockHeight) || + !isAmountWithinDelegationLimit(stacker, stackedAmount) || + !isStackerInOperatorPool(operator, this.stacker) || + !isStackerLockedByOperator(operator, this.stacker) || + isPeriodWithinMax(totalPeriod) + ) return false; + + model.trackCommandRun( + "DelegateStackExtendCommand_Err_Stacking_Invalid_Lock_Period", + ); + return true; + }, + POX_4_ERRORS.ERR_STACKING_INVALID_LOCK_PERIOD, + ), + ), + // DelegateStackExtendCommand_Err_Stacking_Not_Delegated + fc.record({ + operator: fc.constantFrom(...wallets.values()), + extendCount: fc.integer({ min: 1, max: 11 }), + }).chain((r) => { + const operator = stackers.get(r.operator.stxAddress)!; + const delegatorsList = operator.poolMembers; + const availableStackers = delegatorsList.filter((delegator) => { + const delegatorWallet = stackers.get(delegator)!; + return delegatorWallet.unlockHeight > nextCycleFirstBlock(network); + }); + + const availableStackersOrFallback = availableStackers.length === 0 + ? [r.operator.stxAddress] + : availableStackers; + + return fc + .record({ + stacker: fc.constantFrom(...availableStackersOrFallback), + currentCycle: fc.constant(currentCycle(network)), + }) + .map((additionalProps) => ({ + ...r, + stacker: wallets.get(additionalProps.stacker)!, + currentCycle: additionalProps.currentCycle, + })); + }).map( + (final) => + new DelegateStackExtendCommand_Err( + final.operator, + final.stacker, + final.extendCount, + final.currentCycle, + function (this, model) { + const operator = model.stackers.get( + this.operator.stxAddress, + )!; + const stacker = model.stackers.get( + this.stacker.stxAddress, + )!; + + const firstRewardCycle = Math.max( + stacker.firstLockedRewardCycle, + this.currentCycle, + ); + const firstExtendCycle = Math.floor( + (stacker.unlockHeight - FIRST_BURNCHAIN_BLOCK_HEIGHT) / + REWARD_CYCLE_LENGTH, + ); + const lastExtendCycle = firstExtendCycle + this.extendCount - 1; + const totalPeriod = lastExtendCycle - firstRewardCycle + 1; + const newUnlockHeight = + REWARD_CYCLE_LENGTH * (firstRewardCycle + totalPeriod - 1) + + FIRST_BURNCHAIN_BLOCK_HEIGHT; + const stackedAmount = stacker.amountLocked; + + if ( + !isAmountLockedPositive(stacker) || + isDelegating(stacker) || + !isStacking(stacker) || + !isStackingSolo(stacker) || + isStackerDelegatingToOperator(stacker, this.operator) || + isUBHWithinDelegationLimit(stacker, newUnlockHeight) || + isAmountWithinDelegationLimit(stacker, stackedAmount) || + isStackerInOperatorPool(operator, this.stacker) || + isStackerLockedByOperator(operator, this.stacker) || + !isPeriodWithinMax(totalPeriod) + ) return false; + + model.trackCommandRun( + "DelegateStackExtendCommand_Err_Stacking_Not_Delegated", + ); + return true; + }, + POX_4_ERRORS.ERR_STACKING_NOT_DELEGATED, + ), + ), + // DelegateStackExtendCommand_Err_Stack_Extend_Not_Locked + fc.record({ + operator: fc.constantFrom(...wallets.values()), + extendCount: fc.integer({ min: 1, max: 11 }), + }).chain((r) => { + const operator = stackers.get(r.operator.stxAddress)!; + const delegatorsList = operator.poolMembers; + const availableStackers = delegatorsList.filter((delegator) => { + const delegatorWallet = stackers.get(delegator)!; + return delegatorWallet.unlockHeight > nextCycleFirstBlock(network); + }); + + const availableStackersOrFallback = availableStackers.length === 0 + ? [r.operator.stxAddress] + : availableStackers; + + return fc.record({ + stacker: fc.constantFrom(...availableStackersOrFallback), + currentCycle: fc.constant(currentCycle(network)), + }).map((additionalProps) => ({ + ...r, + stacker: wallets.get(additionalProps.stacker)!, + currentCycle: additionalProps.currentCycle, + })); + }).map( + (final) => + new DelegateStackExtendCommand_Err( + final.operator, + final.stacker, + final.extendCount, + final.currentCycle, + function (this, model) { + const operator = model.stackers.get( + this.operator.stxAddress, + )!; + const stacker = model.stackers.get( + this.stacker.stxAddress, + )!; + + const firstRewardCycle = Math.max( + stacker.firstLockedRewardCycle, + this.currentCycle, + ); + const firstExtendCycle = Math.floor( + (stacker.unlockHeight - FIRST_BURNCHAIN_BLOCK_HEIGHT) / + REWARD_CYCLE_LENGTH, + ); + const lastExtendCycle = firstExtendCycle + this.extendCount - 1; + const totalPeriod = lastExtendCycle - firstRewardCycle + 1; + const newUnlockHeight = + REWARD_CYCLE_LENGTH * (firstRewardCycle + totalPeriod - 1) + + FIRST_BURNCHAIN_BLOCK_HEIGHT; + const stackedAmount = stacker.amountLocked; + if ( + isAmountLockedPositive(stacker) || + !isDelegating(stacker) || + isStacking(stacker) || + isStackerDelegatingToOperator(stacker, this.operator) || + !isUBHWithinDelegationLimit(stacker, newUnlockHeight) || + isAmountWithinDelegationLimit(stacker, stackedAmount) || + isStackerInOperatorPool(operator, this.stacker) || + isStackerLockedByOperator(operator, this.stacker) || + !isPeriodWithinMax(totalPeriod) + ) return false; + + model.trackCommandRun( + "DelegateStackExtendCommand_Err_Stack_Extend_Not_Locked", + ); + return true; + }, + POX_4_ERRORS.ERR_STACK_EXTEND_NOT_LOCKED, + ), + ), + // DelegateStackExtendCommand_Err_Stacking_Permission_Denied + fc.record({ + operator: fc.constantFrom(...wallets.values()), + extendCount: fc.integer({ min: 1, max: 11 }), + stacker: fc.constantFrom(...wallets.values()), + currentCycle: fc.constant(currentCycle(network)), + }).map( + (final) => + new DelegateStackExtendCommand_Err( + final.operator, + final.stacker, + final.extendCount, + final.currentCycle, + function (this, model) { + const operator = model.stackers.get( + this.operator.stxAddress, + )!; + const stacker = model.stackers.get( + this.stacker.stxAddress, + )!; + + const firstRewardCycle = Math.max( + stacker.firstLockedRewardCycle, + this.currentCycle, + ); + const firstExtendCycle = Math.floor( + (stacker.unlockHeight - FIRST_BURNCHAIN_BLOCK_HEIGHT) / + REWARD_CYCLE_LENGTH, + ); + const lastExtendCycle = firstExtendCycle + this.extendCount - 1; + const totalPeriod = lastExtendCycle - firstRewardCycle + 1; + const newUnlockHeight = + REWARD_CYCLE_LENGTH * (firstRewardCycle + totalPeriod - 1) + + FIRST_BURNCHAIN_BLOCK_HEIGHT; + const stackedAmount = stacker.amountLocked; + + if ( + !isAmountLockedPositive(stacker) || + isDelegating(stacker) || + !isStacking(stacker) || + isStackerDelegatingToOperator(stacker, this.operator) || + isUBHWithinDelegationLimit(stacker, newUnlockHeight) || + isAmountWithinDelegationLimit(stacker, stackedAmount) || + isStackerInOperatorPool(operator, this.stacker) || + !isStackerLockedByOperator(operator, this.stacker) || + !isPeriodWithinMax(totalPeriod) + ) return false; + + model.trackCommandRun( + "DelegateStackExtendCommand_Err_Stacking_Permission_Denied", + ); + return true; + }, + POX_4_ERRORS.ERR_STACKING_PERMISSION_DENIED, + ), + ), + // DelegateStackIncreaseCommand_Err_Stacking_Insufficient_Funds + fc.record({ + operator: fc.constantFrom(...wallets.values()), + increaseBy: fc.constant(Number.MAX_SAFE_INTEGER), + }).chain((r) => { + const operator = stackers.get(r.operator.stxAddress)!; + const delegatorsList = operator.poolMembers; + + const availableStackers = delegatorsList.filter((delegator) => { + const delegatorWallet = stackers.get(delegator)!; + return delegatorWallet.unlockHeight > nextCycleFirstBlock(network); + }); + + const availableStackersOrFallback = availableStackers.length === 0 + ? [r.operator.stxAddress] + : availableStackers; + + return fc.record({ + stacker: fc.constantFrom(...availableStackersOrFallback), + }).map((stacker) => ({ + ...r, + stacker: wallets.get(stacker.stacker)!, + })); + }).map( + (final) => + new DelegateStackIncreaseCommand_Err( + final.operator, + final.stacker, + final.increaseBy, + function (this, model) { + const operatorWallet = model.stackers.get( + this.operator.stxAddress, + )!; + const stackerWallet = model.stackers.get( + this.stacker.stxAddress, + )!; + + if ( + !isAmountLockedPositive(stackerWallet) || + !isDelegating(stackerWallet) || + !isStacking(stackerWallet) || + !isIncreaseByGTZero(this.increaseBy) || + !isStackerInOperatorPool(operatorWallet, this.stacker) || + isIncreaseByWithinUnlockedBalance( + stackerWallet, + this.increaseBy, + ) || + isAmountWithinDelegationLimit( + stackerWallet, + this.increaseBy + stackerWallet.amountLocked, + ) || + !isStackerLockedByOperator(operatorWallet, this.stacker) || + isUnlockedWithinCurrentRC(stackerWallet, model) + ) return false; + + model.trackCommandRun( + "DelegateStackIncreaseCommand_Err_Stacking_Insufficient_Funds", + ); + return true; + }, + POX_4_ERRORS.ERR_STACKING_INSUFFICIENT_FUNDS, + ), + ), + // DelegateStackIncreaseCommand_Err_Stacking_Invalid_Amount + fc.record({ + operator: fc.constantFrom(...wallets.values()), + increaseBy: fc.constant(0), + }).chain((r) => { + const operator = stackers.get(r.operator.stxAddress)!; + const delegatorsList = operator.poolMembers; + + const availableStackers = delegatorsList.filter((delegator) => { + const delegatorWallet = stackers.get(delegator)!; + return delegatorWallet.unlockHeight > nextCycleFirstBlock(network); + }); + + const availableStackersOrFallback = availableStackers.length === 0 + ? [r.operator.stxAddress] + : availableStackers; + + return fc.record({ + stacker: fc.constantFrom(...availableStackersOrFallback), + }).map((stacker) => ({ + ...r, + stacker: wallets.get(stacker.stacker)!, + })); + }).map( + (final) => + new DelegateStackIncreaseCommand_Err( + final.operator, + final.stacker, + final.increaseBy, + function (this, model) { + const operatorWallet = model.stackers.get( + this.operator.stxAddress, + )!; + const stackerWallet = model.stackers.get( + this.stacker.stxAddress, + )!; + + if ( + !isAmountLockedPositive(stackerWallet) || + !isDelegating(stackerWallet) || + !isStacking(stackerWallet) || + isIncreaseByGTZero(this.increaseBy) || + !isStackerInOperatorPool(operatorWallet, this.stacker) || + !isIncreaseByWithinUnlockedBalance( + stackerWallet, + this.increaseBy, + ) || + !isAmountWithinDelegationLimit( + stackerWallet, + this.increaseBy + stackerWallet.amountLocked, + ) || + !isStackerLockedByOperator(operatorWallet, this.stacker) || + isUnlockedWithinCurrentRC(stackerWallet, model) + ) return false; + + model.trackCommandRun( + "DelegateStackIncreaseCommand_Err_Stacking_Invalid_Amount", + ); + return true; + }, + POX_4_ERRORS.ERR_STACKING_INVALID_AMOUNT, + ), + ), + // DelegateStackIncreaseCommand_Err_Stacking_Not_Delegated + fc.record({ + operator: fc.constantFrom(...wallets.values()), + increaseBy: fc.nat(), + }).chain((r) => { + const operator = stackers.get(r.operator.stxAddress)!; + const delegatorsList = operator.poolMembers; + + const availableStackers = delegatorsList.filter((delegator) => { + const delegatorWallet = stackers.get(delegator)!; + return delegatorWallet.unlockHeight > nextCycleFirstBlock(network); + }); + + const availableStackersOrFallback = availableStackers.length === 0 + ? [r.operator.stxAddress] + : availableStackers; + + return fc.record({ + stacker: fc.constantFrom(...availableStackersOrFallback), + }).map((stacker) => ({ + ...r, + stacker: wallets.get(stacker.stacker)!, + })); + }).map( + (final) => + new DelegateStackIncreaseCommand_Err( + final.operator, + final.stacker, + final.increaseBy, + function (this, model) { + const operatorWallet = model.stackers.get( + this.operator.stxAddress, + )!; + const stackerWallet = model.stackers.get( + this.stacker.stxAddress, + )!; + + if ( + !isAmountLockedPositive(stackerWallet) || + isDelegating(stackerWallet) || + !isStacking(stackerWallet) || + !isStackingSolo(stackerWallet) || + !isIncreaseByGTZero(this.increaseBy) || + isStackerInOperatorPool(operatorWallet, this.stacker) || + !isIncreaseByWithinUnlockedBalance( + stackerWallet, + this.increaseBy, + ) || + isAmountWithinDelegationLimit( + stackerWallet, + this.increaseBy + stackerWallet.amountLocked, + ) || + isStackerLockedByOperator(operatorWallet, this.stacker) || + isUnlockedWithinCurrentRC(stackerWallet, model) + ) return false; + + model.trackCommandRun( + "DelegateStackIncreaseCommand_Err_Stacking_Not_Delegated", + ); + return true; + }, + POX_4_ERRORS.ERR_STACKING_NOT_DELEGATED, + ), + ), + // DelegateStackIncreaseCommand_Err_Stacking_Permission_Denied + fc.record({ + operator: fc.constantFrom(...wallets.values()), + increaseBy: fc.nat(), + }).chain((r) => { + const operator = stackers.get(r.operator.stxAddress)!; + const delegatorsList = operator.poolMembers; + + const availableStackers = delegatorsList.filter((delegator) => { + const delegatorWallet = stackers.get(delegator)!; + return delegatorWallet.unlockHeight > nextCycleFirstBlock(network); + }); + + const availableStackersOrFallback = availableStackers.length === 0 + ? [r.operator.stxAddress] + : availableStackers; + + return fc.record({ + stacker: fc.constantFrom(...availableStackersOrFallback), + }).map((stacker) => ({ + ...r, + stacker: wallets.get(stacker.stacker)!, + })); + }).map( + (final) => + new DelegateStackIncreaseCommand_Err( + final.operator, + final.stacker, + final.increaseBy, + function (this, model) { + const operatorWallet = model.stackers.get( + this.operator.stxAddress, + )!; + const stackerWallet = model.stackers.get( + this.stacker.stxAddress, + )!; + + if ( + !isAmountLockedPositive(stackerWallet) || + isDelegating(stackerWallet) || + !isStacking(stackerWallet) || + !isIncreaseByGTZero(this.increaseBy) || + isStackerInOperatorPool(operatorWallet, this.stacker) || + !isIncreaseByWithinUnlockedBalance( + stackerWallet, + this.increaseBy, + ) || + isAmountWithinDelegationLimit( + stackerWallet, + this.increaseBy + stackerWallet.amountLocked, + ) || + !isStackerLockedByOperator(operatorWallet, this.stacker) || + isUnlockedWithinCurrentRC(stackerWallet, model) + ) return false; + + model.trackCommandRun( + "DelegateStackIncreaseCommand_Err_Stacking_Permission_Denied", + ); + return true; + }, + POX_4_ERRORS.ERR_STACKING_PERMISSION_DENIED, + ), + ), + // DisallowContractCallerCommand_Err + fc.record({ + stacker: fc.constantFrom(...wallets.values()), + callerToDisallow: fc.constantFrom(...wallets.values()), + }).map( + (r: { stacker: Wallet; callerToDisallow: Wallet }) => + new DisallowContractCallerCommand_Err( + r.stacker, + r.callerToDisallow, + function (this, model) { + const stacker = model.stackers.get(this.stacker.stxAddress)!; + const callerToDisallow = model.stackers.get( + this.callerToDisallow.stxAddress, + )!; + if ( + isAllowedContractCaller(stacker, this.callerToDisallow) || + isCallerAllowedByStacker(this.stacker, callerToDisallow) + ) return false; + + model.trackCommandRun( + "DisallowContractCallerCommand_Err", + ); + return true; + }, + ), + ), + ]; + + return cmds; +} diff --git a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox-4.stateful-prop.test.ts b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox-4.stateful-prop.test.ts new file mode 100644 index 00000000000..31a9239a44a --- /dev/null +++ b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox-4.stateful-prop.test.ts @@ -0,0 +1,173 @@ +import { it } from "vitest"; +import { initSimnet } from "@hirosystems/clarinet-sdk"; +import { Real, Stub } from "./pox_CommandModel.ts"; + +import { + getPublicKeyFromPrivate, + publicKeyToBtcAddress, +} from "@stacks/encryption"; +import { StacksDevnet } from "@stacks/network"; +import { + createStacksPrivateKey, + getAddressFromPrivateKey, + TransactionVersion, +} from "@stacks/transactions"; +import { StackingClient } from "@stacks/stacking"; + +import fc from "fast-check"; +import { PoxCommands } from "./pox_Commands.ts"; +import { ErrCommands } from "./err_Commands.ts"; + +import fs from "fs"; +import path from "path"; + +it("statefully interacts with PoX-4", async () => { + // SUT stands for "System Under Test". + const sut: Real = { + network: await initSimnet(), + }; + + const wallets = [ + [ + "wallet_1", + "7287ba251d44a4d3fd9276c88ce34c5c52a038955511cccaf77e61068649c17801", + ], + [ + "wallet_2", + "530d9f61984c888536871c6573073bdfc0058896dc1adfe9a6a10dfacadc209101", + ], + [ + "wallet_3", + "d655b2523bcd65e34889725c73064feb17ceb796831c0e111ba1a552b0f31b3901", + ], + [ + "wallet_4", + "f9d7206a47f14d2870c163ebab4bf3e70d18f5d14ce1031f3902fbbc894fe4c701", + ], + [ + "wallet_5", + "3eccc5dac8056590432db6a35d52b9896876a3d5cbdea53b72400bc9c2099fe801", + ], + [ + "wallet_6", + "7036b29cb5e235e5fd9b09ae3e8eec4404e44906814d5d01cbca968a60ed4bfb01", + ], + [ + "wallet_7", + "b463f0df6c05d2f156393eee73f8016c5372caa0e9e29a901bb7171d90dc4f1401", + ], + [ + "wallet_8", + "6a1a754ba863d7bab14adbbc3f8ebb090af9e871ace621d3e5ab634e1422885e01", + ], + [ + "wallet_9", + "de433bdfa14ec43aa1098d5be594c8ffb20a31485ff9de2923b2689471c401b801", + ], + ].map((wallet) => { + const label = wallet[0]; + const prvKey = wallet[1]; + const pubKey = getPublicKeyFromPrivate(prvKey); + const devnet = new StacksDevnet(); + const initialUstxBalance = 100_000_000_000_000; + const signerPrvKey = createStacksPrivateKey(prvKey); + const signerPubKey = getPublicKeyFromPrivate(signerPrvKey.data); + const btcAddress = publicKeyToBtcAddress(pubKey); + const stxAddress = getAddressFromPrivateKey( + prvKey, + TransactionVersion.Testnet, + ); + + return { + label, + stxAddress, + btcAddress, + signerPrvKey, + signerPubKey, + stackingClient: new StackingClient(stxAddress, devnet), + ustxBalance: initialUstxBalance, + isStacking: false, + hasDelegated: false, + lockedAddresses: [], + amountToCommit: 0, + poolMembers: [], + delegatedTo: "", + delegatedMaxAmount: 0, + delegatedUntilBurnHt: 0, + delegatedPoxAddress: "", + amountLocked: 0, + amountUnlocked: initialUstxBalance, + unlockHeight: 0, + firstLockedRewardCycle: 0, + allowedContractCaller: "", + callerAllowedBy: [], + committedRewCycleIndexes: [], + }; + }); + + // Track the number of times each command is run, so we can see if all the + // commands are run at least once. + const statistics = fs.readdirSync(path.join(__dirname)).filter((file) => + file.startsWith("pox_") && file.endsWith(".ts") && + file !== "pox_CommandModel.ts" && file !== "pox_Commands.ts" && + !file.includes("_Err") + ).map((file) => file.slice(4, -3)); // Remove "pox_" prefix and ".ts" suffix. + + // This is the initial state of the model. + const model = new Stub( + new Map(wallets.map((wallet) => [wallet.stxAddress, wallet])), + new Map(wallets.map((wallet) => [wallet.stxAddress, { + ustxBalance: 100_000_000_000_000, + isStacking: false, + isStackingSolo: false, + hasDelegated: false, + lockedAddresses: [], + amountToCommit: 0, + poolMembers: [], + delegatedTo: "", + delegatedMaxAmount: 0, + // We initialize delegatedUntilBurnHt to 0. It will be updated + // after successful delegate-stx calls. It's value will be either + // the unwrapped until-burn-ht uint passed to the delegate-stx, + // or undefined for indefinite delegations. + delegatedUntilBurnHt: 0, + delegatedPoxAddress: "", + amountLocked: 0, + amountUnlocked: 100_000_000_000_000, + unlockHeight: 0, + firstLockedRewardCycle: 0, + allowedContractCallers: [], + callerAllowedBy: [], + committedRewCycleIndexes: [], + }])), + new Map(statistics.map((commandName) => [commandName, 0])), + ); + + simnet.setEpoch("3.0"); + + const successPath = PoxCommands(model.wallets, model.stackers, sut.network); + const failurePath = ErrCommands(model.wallets, model.stackers, sut.network); + + fc.assert( + fc.property( + // More on size: https://github.com/dubzzz/fast-check/discussions/2978 + // More on cmds: https://github.com/dubzzz/fast-check/discussions/3026 + fc.commands(successPath.concat(failurePath), { size: "xsmall" }), + (cmds) => { + const initialState = () => ({ model: model, real: sut }); + fc.modelRun(initialState, cmds); + }, + ), + { + // Defines the number of test iterations to run; default is 100. + numRuns: 20000, + // Adjusts the level of detail in test reports. Default is 0 (minimal). + // At level 2, reports include extensive details, helpful for deep + // debugging. This includes not just the failing case and its seed, but + // also a comprehensive log of all executed steps and their outcomes. + verbose: 2, + }, + ); + + model.reportCommandRuns(); +}); diff --git a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_AllowContractCallerCommand.ts b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_AllowContractCallerCommand.ts new file mode 100644 index 00000000000..931326ce1fc --- /dev/null +++ b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_AllowContractCallerCommand.ts @@ -0,0 +1,116 @@ +import { + logCommand, + PoxCommand, + Real, + Stub, + Wallet, +} from "./pox_CommandModel.ts"; +import { expect } from "vitest"; +import { + boolCV, + Cl, + ClarityType, + OptionalCV, + UIntCV, +} from "@stacks/transactions"; + +/** + * The `AllowContractCallerCommand` authorizes a `contract-caller` to call + * stacking methods. Normally, stacking methods can only be invoked by direct + * transactions (i.e., the tx-sender issues a direct contract-call to the + * stacking methods). By issuing an allowance, the tx-sender may call stacking + * methods through the allowed contract. + * + * There are no constraints for running this command. + */ +export class AllowContractCallerCommand implements PoxCommand { + readonly wallet: Wallet; + readonly allowanceTo: Wallet; + readonly allowUntilBurnHt: OptionalCV; + + /** + * Constructs an `AllowContractCallerCommand` that authorizes a + * `contract-caller` to call stacking methods. + * + * @param wallet - Represents the Stacker's wallet. + * @param allowanceTo - Represents the authorized `contract-caller` (i.e., a + * stacking pool). + * @param allowUntilBurnHt - The burn block height until which the + * authorization is valid. + */ + constructor( + wallet: Wallet, + allowanceTo: Wallet, + allowUntilBurnHt: OptionalCV, + ) { + this.wallet = wallet; + this.allowanceTo = allowanceTo; + this.allowUntilBurnHt = allowUntilBurnHt; + } + + check(): boolean { + // There are no constraints for running this command. + return true; + } + + run(model: Stub, real: Real): void { + model.trackCommandRun(this.constructor.name); + + // Act + const allowContractCaller = real.network.callPublicFn( + "ST000000000000000000002AMW42H.pox-4", + "allow-contract-caller", + [ + // (caller principal) + Cl.principal(this.allowanceTo.stxAddress), + // (until-burn-ht (optional uint)) + this.allowUntilBurnHt, + ], + this.wallet.stxAddress, + ); + + // Assert + expect(allowContractCaller.result).toBeOk(boolCV(true)); + + // Get the wallets involved from the model and update it with the new state. + const wallet = model.stackers.get(this.wallet.stxAddress)!; + + const callerToAllow = model.stackers.get(this.allowanceTo.stxAddress)!; + // Update model so that we know this wallet has authorized a contract-caller. + const callerToAllowIndexInAllowedList = wallet.allowedContractCallers + .indexOf(this.allowanceTo.stxAddress); + + if (callerToAllowIndexInAllowedList == -1) { + wallet.allowedContractCallers.push(this.allowanceTo.stxAddress); + callerToAllow.callerAllowedBy.push(this.wallet.stxAddress); + } + + // Log to console for debugging purposes. This is not necessary for the + // test to pass but it is useful for debugging and eyeballing the test. + logCommand( + `â‚¿ ${model.burnBlockHeight}`, + `✓ ${this.wallet.label}`, + "allow-contract-caller", + this.allowanceTo.label, + "until", + optionalCVToString(this.allowUntilBurnHt), + ); + + // Refresh the model's state if the network gets to the next reward cycle. + model.refreshStateForNextRewardCycle(real); + } + + toString() { + // fast-check will call toString() in case of errors, e.g. property failed. + // It will then make a minimal counterexample, a process called 'shrinking' + // https://github.com/dubzzz/fast-check/issues/2864#issuecomment-1098002642 + return `${this.wallet.stxAddress} allow-contract-caller ${this.allowanceTo.stxAddress} until burn ht ${ + optionalCVToString(this.allowUntilBurnHt) + }`; + } +} + +const optionalCVToString = (optional: OptionalCV): string => + optional.type === ClarityType.OptionalSome + ? (optional.value as UIntCV).value.toString() + : "none"; diff --git a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_CommandModel.ts b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_CommandModel.ts new file mode 100644 index 00000000000..5b6cb95c272 --- /dev/null +++ b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_CommandModel.ts @@ -0,0 +1,512 @@ +import fc from "fast-check"; + +import { Simnet } from "@hirosystems/clarinet-sdk"; +import { + ClarityValue, + cvToValue, + StacksPrivateKey, +} from "@stacks/transactions"; +import { StackingClient } from "@stacks/stacking"; +import { + FIRST_BURNCHAIN_BLOCK_HEIGHT, + REWARD_CYCLE_LENGTH, +} from "./pox_Commands"; + +export type StxAddress = string; +export type BtcAddress = string; +export type CommandTag = string; + +export class Stub { + readonly wallets: Map; + readonly statistics: Map; + readonly stackers: Map; + stackingMinimum: number; + nextRewardSetIndex: number; + lastRefreshedCycle: number; + burnBlockHeight: number; + + constructor( + wallets: Map, + stackers: Map, + statistics: Map, + ) { + this.wallets = wallets; + this.statistics = statistics; + this.stackers = stackers; + this.stackingMinimum = 0; + this.nextRewardSetIndex = 0; + this.lastRefreshedCycle = 0; + this.burnBlockHeight = 0; + } + + trackCommandRun(commandName: string) { + const count = this.statistics.get(commandName) || 0; + this.statistics.set(commandName, count + 1); + } + + reportCommandRuns() { + console.log("Command run method execution counts:"); + const orderedStatistics = Array.from(this.statistics.entries()).sort( + ([keyA], [keyB]) => { + return keyA.localeCompare(keyB); + }, + ); + + this.logAsTree(orderedStatistics); + } + + private logAsTree(statistics: [string, number][]) { + const tree: { [key: string]: any } = {}; + + statistics.forEach(([commandName, count]) => { + const split = commandName.split("_"); + let root: string = split[0], + rest: string = "base"; + + if (split.length > 1) { + rest = split.slice(1).join("_"); + } + if (!tree[root]) { + tree[root] = {}; + } + tree[root][rest] = count; + }); + + const printTree = (node: any, indent: string = "") => { + const keys = Object.keys(node); + keys.forEach((key, index) => { + const isLast = index === keys.length - 1; + const boxChar = isLast ? "└─ " : "├─ "; + if (key !== "base") { + if (typeof node[key] === "object") { + console.log(`${indent}${boxChar}${key}: ${node[key]["base"]}`); + printTree(node[key], indent + (isLast ? " " : "│ ")); + } else { + console.log(`${indent}${boxChar}${key}: ${node[key]}`); + } + } + }); + }; + + printTree(tree); + } + + refreshStateForNextRewardCycle(real: Real) { + const burnBlockHeightResult = real.network.runSnippet("burn-block-height"); + const burnBlockHeight = Number( + cvToValue(burnBlockHeightResult as ClarityValue), + ); + const lastRefreshedCycle = this.lastRefreshedCycle; + const currentRewCycle = Math.floor( + (Number(burnBlockHeight) - FIRST_BURNCHAIN_BLOCK_HEIGHT) / + REWARD_CYCLE_LENGTH, + ); + + // The `this.burnBlockHeight` instance member is used for logging purposes. + // However, it's not used in the actual implementation of the model and all + // usages below use the `burnBlockHeight` local variable. + this.burnBlockHeight = burnBlockHeight; + + if (lastRefreshedCycle < currentRewCycle) { + this.nextRewardSetIndex = 0; + + this.wallets.forEach((w) => { + let updatedAmountToCommit = 0; + const wallet = this.stackers.get(w.stxAddress)!; + + // Get the wallet's ex-delegators by comparing their delegatedUntilBurnHt + // to the current burn block height (only if the wallet is a delegatee). + // If the delegatedUntilBurnHt is undefined, the delegator is considered + // active for an indefinite period (until a revoke-delegate-stx call). + const expiredDelegators = wallet.poolMembers.filter( + (stackerAddress) => + this.stackers.get(stackerAddress)!.delegatedUntilBurnHt !== + undefined && + this.stackers.get(stackerAddress)!.delegatedUntilBurnHt as number < + burnBlockHeight, + ); + + // Get the operator's pool stackers that no longer have partially commited + // STX for the next reward cycle by comparing their unlock height to + // the next reward cycle's first block (only if the wallet is an operator). + const stackersToRemoveAmountToCommit = wallet.lockedAddresses.filter(( + stackerAddress, + ) => + this.stackers.get(stackerAddress)!.unlockHeight <= + burnBlockHeight + REWARD_CYCLE_LENGTH + ); + + // Get the operator's ex-pool stackers by comparing their unlockHeight to + // the current burn block height (only if the wallet is an operator). + const expiredStackers = wallet.lockedAddresses.filter( + (stackerAddress) => + this.stackers.get(stackerAddress)!.unlockHeight <= + burnBlockHeight, + ); + + // For each remaining pool stacker (if any), increase the operator's + // amountToCommit (partial-stacked) for the next cycle by the + // stacker's amountLocked. + wallet.lockedAddresses.forEach((stacker) => { + const stackerWallet = this.stackers.get(stacker)!; + updatedAmountToCommit += stackerWallet?.amountLocked; + }); + + // Update the operator's amountToCommit (partial-stacked). + wallet.amountToCommit = updatedAmountToCommit; + + // Remove the expired delegators from the delegatee's poolMembers list. + expiredDelegators.forEach((expDelegator) => { + const expDelegatorIndex = wallet.poolMembers.indexOf(expDelegator); + wallet.poolMembers.splice(expDelegatorIndex, 1); + }); + + // Remove the expired stackers from the operator's lockedAddresses list. + expiredStackers.forEach((expStacker) => { + const expStackerIndex = wallet.lockedAddresses.indexOf(expStacker); + wallet.lockedAddresses.splice(expStackerIndex, 1); + }); + + // For each pool stacker that no longer have partially commited STX for + // the next reward cycle, decrement the operator's amountToCommit + // (partial-stacked) by the stacker's amountLocked. + stackersToRemoveAmountToCommit.forEach((expStacker) => { + const expStackerWallet = this.stackers.get(expStacker)!; + wallet.amountToCommit -= expStackerWallet.amountLocked; + }); + + // Check the wallet's stack expiry and update the state accordingly. + if ( + wallet.unlockHeight > 0 && wallet.unlockHeight <= burnBlockHeight + ) { + wallet.isStacking = false; + wallet.isStackingSolo = false; + wallet.amountUnlocked += wallet.amountLocked; + wallet.amountLocked = 0; + wallet.unlockHeight = 0; + wallet.firstLockedRewardCycle = 0; + } // If the wallet is solo stacking and its stack won't expire in the + // next reward cycle, increment the model's nextRewardSetIndex (the + // next empty reward slot) + else if ( + wallet.unlockHeight > 0 && + wallet.unlockHeight > burnBlockHeight + REWARD_CYCLE_LENGTH && + wallet.isStackingSolo + ) { + this.nextRewardSetIndex++; + } + wallet.committedRewCycleIndexes = []; + }); + this.lastRefreshedCycle = currentRewCycle; + } + } +} + +export type Real = { + network: Simnet; +}; + +export type Wallet = { + label: string; + stxAddress: string; + btcAddress: string; + signerPrvKey: StacksPrivateKey; + signerPubKey: string; + stackingClient: StackingClient; +}; + +export type Stacker = { + ustxBalance: number; + isStacking: boolean; + isStackingSolo: boolean; + hasDelegated: boolean; + lockedAddresses: StxAddress[]; + amountToCommit: number; + poolMembers: StxAddress[]; + delegatedTo: StxAddress; + delegatedMaxAmount: number; + delegatedUntilBurnHt: number | undefined; + delegatedPoxAddress: BtcAddress; + amountLocked: number; + amountUnlocked: number; + unlockHeight: number; + firstLockedRewardCycle: number; + allowedContractCallers: StxAddress[]; + callerAllowedBy: StxAddress[]; + committedRewCycleIndexes: number[]; +}; + +export type PoxCommand = fc.Command; + +export const logCommand = (...items: (string | undefined)[]) => { + // Ensure we only render up to the first 10 items for brevity. + const renderItems = items.slice(0, 10); + const columnWidth = 23; // Standard width for each column after the first two. + const halfColumns = Math.floor(columnWidth / 2); + + // Pad columns to their widths: half for the first two, full for the rest. + const prettyPrint = renderItems.map((content, index) => + // Check if the index is less than 2 (i.e., first two items). + content + ? (index < 2 ? content.padEnd(halfColumns) : content.padEnd(columnWidth)) + : (index < 2 ? "".padEnd(halfColumns) : "".padEnd(columnWidth)) + ); + prettyPrint.push("\n"); + + process.stdout.write(prettyPrint.join("")); +}; + +/** + * Helper function that checks if the minimum uSTX threshold was set in the model. + * @param model - the model at a given moment in time. + * @returns boolean. + */ +export const isStackingMinimumCalculated = (model: Readonly): boolean => + model.stackingMinimum > 0; + +/** + * Helper function that checks if a stacker is currently stacking. + * @param stacker - the stacker's state at a given moment in time. + * @returns boolean. + */ +export const isStacking = (stacker: Stacker): boolean => + stacker.isStacking; + +/** + * Helper function that checks if a stacker has an active delegation. + * @param stacker - the stacker's state at a given moment in time. + * @returns boolean. + */ +export const isDelegating = (stacker: Stacker): boolean => + stacker.hasDelegated; + +/** + * Helper function that checks if the stacker is stacking using solo + * stacking methods. + * @param stacker - the stacker's state at a given moment in time. + * @returns boolean. + */ +export const isStackingSolo = (stacker: Stacker): boolean => + stacker.isStackingSolo; + +/** + * Helper function that checks if the stacker has locked uSTX. + * @param stacker - the stacker's state at a given moment in time. + * @returns boolean. + */ +export const isAmountLockedPositive = (stacker: Stacker): boolean => + stacker.amountLocked > 0; + +/** + * Helper function that checks if an operator has locked uSTX on + * behalf of at least one stacker. + * @param operator - the operator's state at a given moment in time. + * @returns boolean. + */ +export const hasLockedStackers = (operator: Stacker): boolean => + operator.lockedAddresses.length > 0; + +/** + * Helper function that checks if an operator has uSTX that was not + * yet committed. + * @param operator - the operator's state at a given moment in time. + * @returns boolean. + * + * NOTE: ATC is an abbreviation for "amount to commit". + */ +export const isATCPositive = (operator: Stacker): boolean => + operator.amountToCommit > 0; + +/** + * Helper function that checks if an operator's not committed uSTX + * amount is above the minimum stacking threshold. + * @param operator - the operator's state at a given moment in time. + * @param model - the model at a given moment in time. + * @returns boolean. + * + * NOTE: ATC is an abbreviation for "amount to commit". + */ export const isATCAboveThreshold = ( + operator: Stacker, + model: Readonly, +): boolean => operator.amountToCommit >= model.stackingMinimum; + +/** + * Helper function that checks if a uSTX amount fits within a stacker's + * delegation limit. + * @param stacker - the stacker's state at a given moment in time. + * @param amountToCheck - the uSTX amount to check. + * @returns boolean. + */ +export const isAmountWithinDelegationLimit = ( + stacker: Stacker, + amountToCheck: bigint | number, +): boolean => stacker.delegatedMaxAmount >= Number(amountToCheck); + +/** + * Helper function that checks if a given unlock burn height is within + * a stacker's delegation limit. + * @param stacker - the stacker's state at a given moment in time. + * @param unlockBurnHt - the verified unlock burn height. + * @returns boolean. + * + * NOTE: UBH is an abbreviation for "unlock burn height". + */ +export const isUBHWithinDelegationLimit = ( + stacker: Stacker, + unlockBurnHt: number, +): boolean => + stacker.delegatedUntilBurnHt === undefined || + unlockBurnHt <= stacker.delegatedUntilBurnHt; + +/** + * Helper function that checks if a given amount is within a stacker's + * unlocked uSTX balance. + * @param stacker - the stacker's state at a given moment in time. + * @param amountToCheck - the amount to check. + * @returns boolean. + */ +export const isAmountWithinBalance = ( + stacker: Stacker, + amountToCheck: bigint | number, +): boolean => stacker.ustxBalance >= Number(amountToCheck); + +/** + * Helper function that checks if a given amount is above the minimum + * stacking threshold. + * @param model - the model at a given moment in time. + * @param amountToCheck - the amount to check. + * @returns boolean. + */ +export const isAmountAboveThreshold = ( + model: Readonly, + amountToCheck: bigint | number, +): boolean => Number(amountToCheck) >= model.stackingMinimum; + +/** + * Helper function that checks if an operator has at least one pool + * participant. + * @param operator - the operator's state at a given moment in time. + * @returns boolean. + */ +export const hasPoolMembers = (operator: Stacker): boolean => + operator.poolMembers.length > 0; + +/** + * Helper function that checks if a stacker is a pool member of a + * given operator. + * @param operator - the operator's state at a given moment in time. + * @param stacker - the stacker's state at a given moment in time. + * @returns boolean + */ +export const isStackerInOperatorPool = ( + operator: Stacker, + stacker: Wallet, +): boolean => operator.poolMembers.includes(stacker.stxAddress); + +/** + * Helper function that checks if a given stacker's funds are locked + * by a given operator. + * @param stacker - the stacker's state at a given moment in time. + * @param operator - the operator's state at a given moment in time. + * @returns boolean. + */ +export const isStackerLockedByOperator = ( + operator: Stacker, + stacker: Wallet, +): boolean => + operator.lockedAddresses.includes( + stacker.stxAddress, + ); + +/** + * Helper function that checks if a given stacker's unlock height is + * within the current reward cycle. + * @param stacker - the stacker's state at a given moment in time. + * @param model - the model at a given moment in time. + * @returns boolean. + * + * NOTE: RC is an abbreviation for "reward cycle". + */ +export const isUnlockedWithinCurrentRC = ( + stackerWallet: Stacker, + model: Readonly, +): boolean => (stackerWallet.unlockHeight <= + model.burnBlockHeight + REWARD_CYCLE_LENGTH); + +/** + * Helper function that checks if the increase amount is within a given + * stacker's unlocked balance. + * @param stacker - the stacker's state at a given moment in time. + * @param increaseBy - the increase amount to check. + * @returns boolean. + */ +export const isIncreaseByWithinUnlockedBalance = ( + stacker: Stacker, + increaseBy: number, +): boolean => increaseBy <= stacker.amountUnlocked; + +/** + * Helper function that checks if the increase amount is greater than zero. + * @param increaseBy - the increase amount to check. + * @returns boolean. + */ +export const isIncreaseByGTZero = (increaseBy: number): boolean => + increaseBy >= 1; + +/** + * Helper function that checks if the increase amount does not exceed the + * PoX-4 maximum lock period. + * @param period - the period to check. + * @returns boolean. + */ +export const isPeriodWithinMax = (period: number) => period <= 12; + +/** + * Helper function that checks if a given stacker is currently delegating + * to a given operator. + * @param stacker - the stacker's state at a given moment in time. + * @param operator - the operator's state at a given moment in time. + * @returns boolean. + */ +export const isStackerDelegatingToOperator = ( + stacker: Stacker, + operator: Wallet, +): boolean => stacker.delegatedTo === operator.stxAddress; + +/** + * Helper function that checks if a given increase amount is greater than + * zero. + * @param increaseAmount - the increase amount to check + * @returns boolean. + */ +export const isIncreaseAmountGTZero = (increaseAmount: number): boolean => + increaseAmount > 0; + +/** + * Helper function that checks if a given stacker's has issued an allowance + * to a potential contract caller. + * @param stacker - the stacker's state at a given moment in time. + * @param potentialAllowedStacker - the potential contract caller's state. + * @returns boolean. + */ +export const isAllowedContractCaller = ( + stacker: Stacker, + potentialAllowedStacker: Wallet, +): boolean => + stacker.allowedContractCallers.includes( + potentialAllowedStacker.stxAddress, + ); + +/** + * Helper function that checks if a given contract caller has been allowed by + * a given stacker. + * @param stacker - the stacker's state at a given moment in time. + * @param caller - the contract caller's state. + * @returns boolean. + */ +export const isCallerAllowedByStacker = ( + stacker: Wallet, + caller: Stacker, +): boolean => caller.callerAllowedBy.includes(stacker.stxAddress); + +export const isPositive = (value: number): boolean => value >= 0; \ No newline at end of file diff --git a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_Commands.ts b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_Commands.ts new file mode 100644 index 00000000000..a42cb6278ee --- /dev/null +++ b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_Commands.ts @@ -0,0 +1,490 @@ +import fc from "fast-check"; +import { PoxCommand, Stacker, StxAddress, Wallet } from "./pox_CommandModel"; +import { GetStackingMinimumCommand } from "./pox_GetStackingMinimumCommand"; +import { GetStxAccountCommand } from "./pox_GetStxAccountCommand"; +import { StackStxSigCommand } from "./pox_StackStxSigCommand"; +import { StackStxAuthCommand } from "./pox_StackStxAuthCommand"; +import { DelegateStxCommand } from "./pox_DelegateStxCommand"; +import { DelegateStackStxCommand } from "./pox_DelegateStackStxCommand"; +import { Simnet } from "@hirosystems/clarinet-sdk"; +import { Cl, cvToValue, OptionalCV, UIntCV } from "@stacks/transactions"; +import { RevokeDelegateStxCommand } from "./pox_RevokeDelegateStxCommand"; +import { AllowContractCallerCommand } from "./pox_AllowContractCallerCommand"; +import { DelegateStackIncreaseCommand } from "./pox_DelegateStackIncreaseCommand"; +import { DelegateStackExtendCommand } from "./pox_DelegateStackExtendCommand"; +import { StackAggregationCommitAuthCommand } from "./pox_StackAggregationCommitAuthCommand"; +import { StackAggregationCommitSigCommand } from "./pox_StackAggregationCommitSigCommand"; +import { StackAggregationCommitIndexedSigCommand } from "./pox_StackAggregationCommitIndexedSigCommand"; +import { StackAggregationCommitIndexedAuthCommand } from "./pox_StackAggregationCommitIndexedAuthCommand"; +import { StackAggregationIncreaseCommand } from "./pox_StackAggregationIncreaseCommand"; +import { DisallowContractCallerCommand } from "./pox_DisallowContractCallerCommand"; +import { StackExtendAuthCommand } from "./pox_StackExtendAuthCommand"; +import { StackExtendSigCommand } from "./pox_StackExtendSigCommand"; +import { StackIncreaseAuthCommand } from "./pox_StackIncreaseAuthCommand"; +import { StackIncreaseSigCommand } from "./pox_StackIncreaseSigCommand"; + +export function PoxCommands( + wallets: Map, + stackers: Map, + network: Simnet, +): fc.Arbitrary[] { + const cmds = [ + // GetStackingMinimumCommand + fc.record({ + wallet: fc.constantFrom(...wallets.values()), + }).map(( + r: { + wallet: Wallet; + }, + ) => + new GetStackingMinimumCommand( + r.wallet, + ) + ), + // StackStxSigCommand + fc.record({ + wallet: fc.constantFrom(...wallets.values()), + authId: fc.nat(), + period: fc.integer({ min: 1, max: 12 }), + margin: fc.integer({ min: 1, max: 9 }), + }).map(( + r: { + wallet: Wallet; + authId: number; + period: number; + margin: number; + }, + ) => + new StackStxSigCommand( + r.wallet, + r.authId, + r.period, + r.margin, + ) + ), + // StackStxAuthCommand + fc.record({ + wallet: fc.constantFrom(...wallets.values()), + authId: fc.nat(), + period: fc.integer({ min: 1, max: 12 }), + margin: fc.integer({ min: 1, max: 9 }), + }).map(( + r: { + wallet: Wallet; + authId: number; + period: number; + margin: number; + }, + ) => + new StackStxAuthCommand( + r.wallet, + r.authId, + r.period, + r.margin, + ) + ), + // StackExtendAuthCommand + fc + .record({ + wallet: fc.constantFrom(...wallets.values()), + authId: fc.nat(), + extendCount: fc.integer({ min: 1, max: 12 }), + currentCycle: fc.constant(currentCycle(network)), + }) + .map( + (r: { + wallet: Wallet; + extendCount: number; + authId: number; + currentCycle: number; + }) => + new StackExtendAuthCommand( + r.wallet, + r.extendCount, + r.authId, + r.currentCycle, + ), + ), + // StackExtendSigCommand + fc + .record({ + wallet: fc.constantFrom(...wallets.values()), + authId: fc.nat(), + extendCount: fc.integer({ min: 1, max: 12 }), + currentCycle: fc.constant(currentCycle(network)), + }) + .map( + (r: { + wallet: Wallet; + extendCount: number; + authId: number; + currentCycle: number; + }) => + new StackExtendSigCommand( + r.wallet, + r.extendCount, + r.authId, + r.currentCycle, + ), + ), + // StackIncreaseAuthCommand + fc.record({ + operator: fc.constantFrom(...wallets.values()), + increaseBy: fc.nat(), + authId: fc.nat(), + }) + .map((r) => { + return new StackIncreaseAuthCommand( + r.operator, + r.increaseBy, + r.authId, + ); + }), + // StackIncreaseSigCommand + fc.record({ + operator: fc.constantFrom(...wallets.values()), + increaseBy: fc.nat(), + authId: fc.nat(), + }) + .map((r) => { + return new StackIncreaseSigCommand( + r.operator, + r.increaseBy, + r.authId, + ); + }), + // GetStackingMinimumCommand + fc + .record({ + wallet: fc.constantFrom(...wallets.values()), + }) + .map((r: { wallet: Wallet }) => new GetStackingMinimumCommand(r.wallet)), + // DelegateStxCommand + fc.record({ + wallet: fc.constantFrom(...wallets.values()), + delegateTo: fc.constantFrom(...wallets.values()), + untilBurnHt: fc.oneof( + fc.constant(Cl.none()), + fc.integer({ min: 1 }).map((value) => Cl.some(Cl.uint(value))), + ), + amount: fc.bigInt({ min: 0n, max: 100_000_000_000_000n }), + }).map(( + r: { + wallet: Wallet; + delegateTo: Wallet; + untilBurnHt: OptionalCV; + amount: bigint; + }, + ) => + new DelegateStxCommand( + r.wallet, + r.delegateTo, + r.untilBurnHt, + r.amount, + ) + ), + // StackAggregationCommitAuthCommand + fc.record({ + wallet: fc.constantFrom(...wallets.values()), + authId: fc.nat(), + }).map(( + r: { + wallet: Wallet; + authId: number; + }, + ) => + new StackAggregationCommitAuthCommand( + r.wallet, + r.authId, + ) + ), + // StackAggregationCommitSigCommand + fc.record({ + wallet: fc.constantFrom(...wallets.values()), + authId: fc.nat(), + }).map(( + r: { + wallet: Wallet; + authId: number; + }, + ) => + new StackAggregationCommitSigCommand( + r.wallet, + r.authId, + ) + ), + // StackAggregationCommitIndexedAuthCommand + fc.record({ + wallet: fc.constantFrom(...wallets.values()), + authId: fc.nat(), + }).map(( + r: { + wallet: Wallet; + authId: number; + }, + ) => + new StackAggregationCommitIndexedAuthCommand( + r.wallet, + r.authId, + ) + ), + // StackAggregationCommitIndexedSigCommand + fc.record({ + wallet: fc.constantFrom(...wallets.values()), + authId: fc.nat(), + }).map(( + r: { + wallet: Wallet; + authId: number; + }, + ) => + new StackAggregationCommitIndexedSigCommand( + r.wallet, + r.authId, + ) + ), + // StackAggregationIncreaseCommand + fc.record({ + wallet: fc.constantFrom(...wallets.values()), + authId: fc.nat(), + }).chain((r) => { + const operator = stackers.get(r.wallet.stxAddress)!; + const committedRewCycleIndexesOrFallback = + operator.committedRewCycleIndexes.length > 0 + ? operator.committedRewCycleIndexes + : [-1]; + return fc.record({ + rewardCycleIndex: fc.constantFrom( + ...committedRewCycleIndexesOrFallback, + ), + }).map((cycleIndex) => ({ ...r, ...cycleIndex })); + }).map(( + r: { + wallet: Wallet; + rewardCycleIndex: number; + authId: number; + }, + ) => + new StackAggregationIncreaseCommand( + r.wallet, + r.rewardCycleIndex, + r.authId, + ) + ), + // RevokeDelegateStxCommand + fc.record({ + wallet: fc.constantFrom(...wallets.values()), + }).map(( + r: { + wallet: Wallet; + }, + ) => + new RevokeDelegateStxCommand( + r.wallet, + ) + ), + // DelegateStackStxCommand + fc.record({ + operator: fc.constantFrom(...wallets.values()), + startBurnHt: fc.integer({ + min: currentCycleFirstBlock(network), + max: nextCycleFirstBlock(network), + }), + period: fc.integer({ min: 1, max: 12 }), + }).chain((r) => { + const operator = stackers.get(r.operator.stxAddress)!; + // Determine available stackers based on the operator + const availableStackers = operator.poolMembers.length > 0 + ? operator.poolMembers + : [r.operator.stxAddress]; + + return fc.record({ + stacker: fc.constantFrom(...availableStackers), + }).map((stacker) => ({ + ...r, + stacker: wallets.get(stacker.stacker)!, + })).chain((resultWithStacker) => { + return fc.record({ + unlockBurnHt: fc.constant( + currentCycleFirstBlock(network) + + 1050 * (resultWithStacker.period + 1), + ), + }).map((additionalProps) => ({ + ...resultWithStacker, + ...additionalProps, + })); + }).chain((resultWithUnlockHeight) => { + return fc.record({ + amount: fc.bigInt({ + min: 0n, + max: BigInt( + stackers.get(resultWithUnlockHeight.stacker.stxAddress)! + .delegatedMaxAmount, + ), + }), + }).map((amountProps) => ({ + ...resultWithUnlockHeight, + ...amountProps, + })); + }); + }).map((finalResult) => { + return new DelegateStackStxCommand( + finalResult.operator, + finalResult.stacker, + finalResult.period, + finalResult.amount, + finalResult.unlockBurnHt, + ); + }), + // DelegateStackIncreaseCommand + fc.record({ + operator: fc.constantFrom(...wallets.values()), + increaseBy: fc.nat(), + }) + .chain((r) => { + const operator = stackers.get(r.operator.stxAddress)!; + const delegatorsList = operator.poolMembers; + + const availableStackers = delegatorsList.filter((delegator) => { + const delegatorWallet = stackers.get(delegator)!; + return delegatorWallet.unlockHeight > nextCycleFirstBlock(network); + }); + + const availableStackersOrFallback = availableStackers.length === 0 + ? [r.operator.stxAddress] + : availableStackers; + + return fc + .record({ + stacker: fc.constantFrom(...availableStackersOrFallback), + }) + .map((stacker) => ({ + ...r, + stacker: wallets.get(stacker.stacker)!, + })); + }) + .map((final) => { + return new DelegateStackIncreaseCommand( + final.operator, + final.stacker, + final.increaseBy, + ); + }), + // DelegateStackExtendCommand + fc.record({ + operator: fc.constantFrom(...wallets.values()), + extendCount: fc.integer({ min: 1, max: 11 }), + }).chain((r) => { + const operator = stackers.get(r.operator.stxAddress)!; + const delegatorsList = operator.poolMembers; + const availableStackers = delegatorsList.filter((delegator) => { + const delegatorWallet = stackers.get(delegator)!; + return delegatorWallet.unlockHeight > nextCycleFirstBlock(network); + }); + + const availableStackersOrFallback = availableStackers.length === 0 + ? [r.operator.stxAddress] + : availableStackers; + + return fc.record({ + stacker: fc.constantFrom(...availableStackersOrFallback), + currentCycle: fc.constant(currentCycle(network)), + }) + .map((additionalProps) => ({ + ...r, + stacker: wallets.get(additionalProps.stacker)!, + currentCycle: additionalProps.currentCycle, + })); + }).map((final) => + new DelegateStackExtendCommand( + final.operator, + final.stacker, + final.extendCount, + final.currentCycle, + ) + ), + // AllowContractCallerCommand + fc.record({ + wallet: fc.constantFrom(...wallets.values()), + allowanceTo: fc.constantFrom(...wallets.values()), + alllowUntilBurnHt: fc.oneof( + fc.constant(Cl.none()), + fc.integer({ min: 1 }).map((value) => Cl.some(Cl.uint(value))), + ), + }) + .map( + (r: { + wallet: Wallet; + allowanceTo: Wallet; + alllowUntilBurnHt: OptionalCV; + }) => + new AllowContractCallerCommand( + r.wallet, + r.allowanceTo, + r.alllowUntilBurnHt, + ), + ), + // DisallowContractCallerCommand + fc.record({ + stacker: fc.constantFrom(...wallets.values()), + callerToDisallow: fc.constantFrom(...wallets.values()), + }).map( + (r: { + stacker: Wallet; + callerToDisallow: Wallet; + }) => + new DisallowContractCallerCommand( + r.stacker, + r.callerToDisallow, + ), + ), + // GetStxAccountCommand + fc.record({ + wallet: fc.constantFrom(...wallets.values()), + }).map(( + r: { + wallet: Wallet; + }, + ) => + new GetStxAccountCommand( + r.wallet, + ) + ), + ]; + + return cmds; +} + +export const REWARD_CYCLE_LENGTH = 1050; + +export const FIRST_BURNCHAIN_BLOCK_HEIGHT = 0; + +export const currentCycle = (network: Simnet) => + Number(cvToValue( + network.callReadOnlyFn( + "ST000000000000000000002AMW42H.pox-4", + "current-pox-reward-cycle", + [], + "ST1PQHQKV0RJXZFY1DGX8MNSNYVE3VGZJSRTPGZGM", + ).result, + )); + +export const currentCycleFirstBlock = (network: Simnet) => + Number(cvToValue( + network.callReadOnlyFn( + "ST000000000000000000002AMW42H.pox-4", + "reward-cycle-to-burn-height", + [Cl.uint(currentCycle(network))], + "ST1PQHQKV0RJXZFY1DGX8MNSNYVE3VGZJSRTPGZGM", + ).result, + )); + +export const nextCycleFirstBlock = (network: Simnet) => + Number(cvToValue( + network.callReadOnlyFn( + "ST000000000000000000002AMW42H.pox-4", + "reward-cycle-to-burn-height", + [Cl.uint(currentCycle(network) + 1)], + "ST1PQHQKV0RJXZFY1DGX8MNSNYVE3VGZJSRTPGZGM", + ).result, + )); diff --git a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_DelegateStackExtendCommand.ts b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_DelegateStackExtendCommand.ts new file mode 100644 index 00000000000..a3fe2a5f1a2 --- /dev/null +++ b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_DelegateStackExtendCommand.ts @@ -0,0 +1,178 @@ +import { + isAmountLockedPositive, + isAmountWithinDelegationLimit, + isPeriodWithinMax, + isStackerDelegatingToOperator, + isDelegating, + isStacking, + isUBHWithinDelegationLimit, + isStackerInOperatorPool, + isStackerLockedByOperator, + logCommand, + PoxCommand, + Real, + Stub, + Wallet, +} from "./pox_CommandModel.ts"; +import { poxAddressToTuple } from "@stacks/stacking"; +import { assert, expect } from "vitest"; +import { Cl, ClarityType, isClarityType } from "@stacks/transactions"; +import { + FIRST_BURNCHAIN_BLOCK_HEIGHT, + REWARD_CYCLE_LENGTH, +} from "./pox_Commands.ts"; + +/** + * The `DelegateStackExtendCommand` allows a pool operator to + * extend an active stacking lock, issuing a "partial commitment" + * for the extended-to cycles. + * + * This method extends stacker's current lockup for an additional + * extend-count and partially commits those new cycles to `pox-addr`. + * + * Constraints for running this command include: + * - Stacker must have locked uSTX. + * - The Operator has to currently be delegated by the Stacker. + * - The new lock period must be less than or equal to 12. + */ +export class DelegateStackExtendCommand implements PoxCommand { + readonly operator: Wallet; + readonly stacker: Wallet; + readonly extendCount: number; + readonly currentCycle: number; + + /** + * Constructs a `DelegateStackExtendCommand` to extend the unlock + * height as a Pool Operator on behalf of a Stacker. + * + * @param operator - Represents the Pool Operator's wallet. + * @param stacker - Represents the Stacker's wallet. + * @param extendCount - Represents the number of cycles to extend + * the stack for. + * @param currentCycle - Represents the current PoX reward cycle. + */ + constructor( + operator: Wallet, + stacker: Wallet, + extendCount: number, + currentCycle: number, + ) { + this.operator = operator; + this.stacker = stacker; + this.extendCount = extendCount; + this.currentCycle = currentCycle; + } + + check(model: Readonly): boolean { + // Constraints for running this command include: + // - Stacker must have locked uSTX. + // - The Stacker's uSTX must have been locked by the Operator. + // - The Operator has to currently be delegated by the Stacker. + // - The new lock period must be less than or equal to 12. + + const operatorWallet = model.stackers.get(this.operator.stxAddress)!; + const stackerWallet = model.stackers.get(this.stacker.stxAddress)!; + + const firstRewardCycle = Math.max( + stackerWallet.firstLockedRewardCycle, + this.currentCycle, + ); + const firstExtendCycle = Math.floor( + (stackerWallet.unlockHeight - FIRST_BURNCHAIN_BLOCK_HEIGHT) / + REWARD_CYCLE_LENGTH, + ); + const lastExtendCycle = firstExtendCycle + this.extendCount - 1; + const totalPeriod = lastExtendCycle - firstRewardCycle + 1; + const newUnlockHeight = + REWARD_CYCLE_LENGTH * (firstRewardCycle + totalPeriod - 1) + + FIRST_BURNCHAIN_BLOCK_HEIGHT; + const stackedAmount = stackerWallet.amountLocked; + + return ( + isAmountLockedPositive(stackerWallet) && + isDelegating(stackerWallet) && + isStacking(stackerWallet) && + isStackerDelegatingToOperator(stackerWallet, this.operator) && + isUBHWithinDelegationLimit(stackerWallet, newUnlockHeight) && + isAmountWithinDelegationLimit(stackerWallet, stackedAmount) && + isStackerInOperatorPool(operatorWallet, this.stacker) && + isStackerLockedByOperator(operatorWallet, this.stacker) && + isPeriodWithinMax(totalPeriod) + ); + } + + run(model: Stub, real: Real): void { + model.trackCommandRun(this.constructor.name); + + const stackerWallet = model.stackers.get(this.stacker.stxAddress)!; + + // Act + const delegateStackExtend = real.network.callPublicFn( + "ST000000000000000000002AMW42H.pox-4", + "delegate-stack-extend", + [ + // (stacker principal) + Cl.principal(this.stacker.stxAddress), + // (pox-addr { version: (buff 1), hashbytes: (buff 32) }) + poxAddressToTuple(stackerWallet.delegatedPoxAddress), + // (extend-count uint) + Cl.uint(this.extendCount), + ], + this.operator.stxAddress, + ); + + const { result: firstExtendCycle } = real.network.callReadOnlyFn( + "ST000000000000000000002AMW42H.pox-4", + "burn-height-to-reward-cycle", + [Cl.uint(stackerWallet.unlockHeight)], + this.operator.stxAddress, + ); + assert(isClarityType(firstExtendCycle, ClarityType.UInt)); + + const lastExtendCycle = Number(firstExtendCycle.value) + this.extendCount - + 1; + + const { result: extendedUnlockHeight } = real.network.callReadOnlyFn( + "ST000000000000000000002AMW42H.pox-4", + "reward-cycle-to-burn-height", + [Cl.uint(lastExtendCycle + 1)], + this.operator.stxAddress, + ); + assert(isClarityType(extendedUnlockHeight, ClarityType.UInt)); + const newUnlockHeight = extendedUnlockHeight.value; + + // Assert + expect(delegateStackExtend.result).toBeOk( + Cl.tuple({ + stacker: Cl.principal(this.stacker.stxAddress), + "unlock-burn-height": Cl.uint(newUnlockHeight), + }), + ); + + // Get the Stacker's wallet from the model and update it with the new state. + // Update model so that we know this wallet's unlock height was extended. + stackerWallet.unlockHeight = Number(newUnlockHeight); + + // Log to console for debugging purposes. This is not necessary for the + // test to pass but it is useful for debugging and eyeballing the test. + logCommand( + `â‚¿ ${model.burnBlockHeight}`, + `✓ ${this.operator.label} Ó¾ ${this.stacker.label}`, + "delegate-stack-extend", + "extend count", + this.extendCount.toString(), + "new unlock height", + stackerWallet.unlockHeight.toString(), + ); + + // Refresh the model's state if the network gets to the next reward cycle. + model.refreshStateForNextRewardCycle(real); + } + + toString() { + // fast-check will call toString() in case of errors, e.g. property failed. + // It will then make a minimal counterexample, a process called 'shrinking' + // https://github.com/dubzzz/fast-check/issues/2864#issuecomment-1098002642 + return `${this.operator.label} delegate-stack-extend extend count ${this.extendCount}`; + } +} diff --git a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_DelegateStackExtendCommand_Err.ts b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_DelegateStackExtendCommand_Err.ts new file mode 100644 index 00000000000..680532bef64 --- /dev/null +++ b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_DelegateStackExtendCommand_Err.ts @@ -0,0 +1,96 @@ +import { + logCommand, + PoxCommand, + Real, + Stub, + Wallet, +} from "./pox_CommandModel.ts"; +import { poxAddressToTuple } from "@stacks/stacking"; +import { expect } from "vitest"; +import { Cl } from "@stacks/transactions"; + +type CheckFunc = ( + this: DelegateStackExtendCommand_Err, + model: Readonly, +) => boolean; + +export class DelegateStackExtendCommand_Err implements PoxCommand { + readonly operator: Wallet; + readonly stacker: Wallet; + readonly extendCount: number; + readonly currentCycle: number; + readonly checkFunc: CheckFunc; + readonly errorCode: number; + + /** + * Constructs a `DelegateStackExtendCommand_Err` to extend the unlock + * height as a Pool Operator on behalf of a Stacker. + * + * @param operator - Represents the Pool Operator's wallet. + * @param stacker - Represents the Stacker's wallet. + * @param extendCount - Represents the number of cycles to extend the stack for. + * @param currentCycle - Represents the current PoX reward cycle. + * @param checkFunc - A function to check constraints for running this command. + * @param errorCode - The expected error code when running this command. + */ + constructor( + operator: Wallet, + stacker: Wallet, + extendCount: number, + currentCycle: number, + checkFunc: CheckFunc, + errorCode: number, + ) { + this.operator = operator; + this.stacker = stacker; + this.extendCount = extendCount; + this.currentCycle = currentCycle; + this.checkFunc = checkFunc; + this.errorCode = errorCode; + } + + check = (model: Readonly): boolean => this.checkFunc.call(this, model); + + run(model: Stub, real: Real): void { + const stackerWallet = model.stackers.get(this.stacker.stxAddress)!; + + // Act + const delegateStackExtend = real.network.callPublicFn( + "ST000000000000000000002AMW42H.pox-4", + "delegate-stack-extend", + [ + // (stacker principal) + Cl.principal(this.stacker.stxAddress), + // (pox-addr { version: (buff 1), hashbytes: (buff 32) }) + poxAddressToTuple(this.operator.btcAddress), + // (extend-count uint) + Cl.uint(this.extendCount), + ], + this.operator.stxAddress, + ); + + expect(delegateStackExtend.result).toBeErr(Cl.int(this.errorCode)); + + // Log to console for debugging purposes. This is not necessary for the + // test to pass but it is useful for debugging and eyeballing the test. + logCommand( + `â‚¿ ${model.burnBlockHeight}`, + `✗ ${this.operator.label} Ó¾ ${this.stacker.label}`, + "delegate-stack-extend", + "extend count", + this.extendCount.toString(), + "new unlock height", + stackerWallet.unlockHeight.toString(), + ); + + // Refresh the model's state if the network gets to the next reward cycle. + model.refreshStateForNextRewardCycle(real); + } + + toString() { + // fast-check will call toString() in case of errors, e.g. property failed. + // It will then make a minimal counterexample, a process called 'shrinking' + // https://github.com/dubzzz/fast-check/issues/2864#issuecomment-1098002642 + return `${this.operator.label} Ó¾ ${this.stacker.label} delegate-stack-extend extend count ${this.extendCount}`; + } +} diff --git a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_DelegateStackIncreaseCommand.ts b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_DelegateStackIncreaseCommand.ts new file mode 100644 index 00000000000..43b6a0473af --- /dev/null +++ b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_DelegateStackIncreaseCommand.ts @@ -0,0 +1,150 @@ +import { + isAmountLockedPositive, + isAmountWithinDelegationLimit, + isIncreaseAmountGTZero, + isIncreaseByWithinUnlockedBalance, + isStackerDelegatingToOperator, + isDelegating, + isStacking, + isUnlockedWithinCurrentRC, + isStackerLockedByOperator, + logCommand, + PoxCommand, + Real, + Stub, + Wallet, +} from "./pox_CommandModel.ts"; +import { poxAddressToTuple } from "@stacks/stacking"; +import { expect } from "vitest"; +import { Cl } from "@stacks/transactions"; + +/** + * The `DelegateStackIncreaseCommand` allows a pool operator to + * increase an active stacking lock, issuing a "partial commitment" + * for the increased cycles. + * + * This method increases stacker's current lockup and partially + * commits the additional STX to `pox-addr`. + * + * Constraints for running this command include: + * - The Stacker must have locked uSTX. + * - The Operator has to currently be delegated by the Stacker. + * - The increase amount must be greater than 0. + * - Stacker's unlocked uSTX amount must be greater than or equal + * to the value of the increase amount. + * - Stacker's maximum delegated amount must be greater than or equal + * to the final locked amount. + * - The Operator must have locked the Stacker's previously locked funds. + */ +export class DelegateStackIncreaseCommand implements PoxCommand { + readonly operator: Wallet; + readonly stacker: Wallet; + readonly increaseBy: number; + + /** + * Constructs a `DelegateStackIncreaseCommand` to increase the uSTX amount + * previously locked on behalf of a Stacker. + * + * @param operator - Represents the Pool Operator's wallet. + * @param stacker - Represents the Stacker's wallet. + * @param increaseBy - Represents the locked amount to be increased by. + */ + constructor(operator: Wallet, stacker: Wallet, increaseBy: number) { + this.operator = operator; + this.stacker = stacker; + this.increaseBy = increaseBy; + } + + check(model: Readonly): boolean { + // Constraints for running this command include: + // - The Stacker must have locked uSTX. + // - The Operator has to currently be delegated by the Stacker. + // - The increase amount must be greater than 0. + // - Stacker's unlocked uSTX amount must be greater than or equal + // to the value of the increase amount. + // - Stacker's maximum delegated amount must be greater than or equal + // to the final locked amount. + // - The Operator must have locked the Stacker's previously locked funds. + + const operatorWallet = model.stackers.get(this.operator.stxAddress)!; + const stackerWallet = model.stackers.get(this.stacker.stxAddress)!; + + return ( + isAmountLockedPositive(stackerWallet) && + isDelegating(stackerWallet) && + isStacking(stackerWallet) && + isIncreaseAmountGTZero(this.increaseBy) && + isStackerDelegatingToOperator(stackerWallet, this.operator) && + isIncreaseByWithinUnlockedBalance(stackerWallet, this.increaseBy) && + isAmountWithinDelegationLimit( + stackerWallet, + this.increaseBy + stackerWallet.amountLocked, + ) && + isStackerLockedByOperator(operatorWallet, this.stacker) && + isUnlockedWithinCurrentRC(stackerWallet, model) + ); + } + + run(model: Stub, real: Real): void { + model.trackCommandRun(this.constructor.name); + + const stackerWallet = model.stackers.get(this.stacker.stxAddress)!; + const prevLocked = stackerWallet.amountLocked; + const newTotalLocked = prevLocked + this.increaseBy; + // Act + const delegateStackIncrease = real.network.callPublicFn( + "ST000000000000000000002AMW42H.pox-4", + "delegate-stack-increase", + [ + // (stacker principal) + Cl.principal(this.stacker.stxAddress), + // (pox-addr { version: (buff 1), hashbytes: (buff 32) }) + poxAddressToTuple(stackerWallet.delegatedPoxAddress), + // (increase-by uint) + Cl.uint(this.increaseBy), + ], + this.operator.stxAddress, + ); + + // Assert + expect(delegateStackIncrease.result).toBeOk( + Cl.tuple({ + stacker: Cl.principal(this.stacker.stxAddress), + "total-locked": Cl.uint(newTotalLocked), + }), + ); + + // Get the Stacker's wallet from the model and update it with the new state. + const operatorWallet = model.stackers.get(this.operator.stxAddress)!; + // Update model so that we know this stacker has increased the stacked amount. + // Update locked and unlocked fields in the model. + stackerWallet.amountLocked = newTotalLocked; + stackerWallet.amountUnlocked = stackerWallet.amountUnlocked - + this.increaseBy; + operatorWallet.amountToCommit += this.increaseBy; + + // Log to console for debugging purposes. This is not necessary for the + // test to pass but it is useful for debugging and eyeballing the test. + logCommand( + `â‚¿ ${model.burnBlockHeight}`, + `✓ ${this.operator.label} Ó¾ ${this.stacker.label}`, + "delegate-stack-increase", + "increased by", + this.increaseBy.toString(), + "previously locked", + prevLocked.toString(), + "total locked", + stackerWallet.amountLocked.toString(), + ); + + // Refresh the model's state if the network gets to the next reward cycle. + model.refreshStateForNextRewardCycle(real); + } + + toString() { + // fast-check will call toString() in case of errors, e.g. property failed. + // It will then make a minimal counterexample, a process called 'shrinking' + // https://github.com/dubzzz/fast-check/issues/2864#issuecomment-1098002642 + return `${this.operator.label} delegate-stack-increase by ${this.increaseBy}`; + } +} diff --git a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_DelegateStackIncreaseCommand_Err.ts b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_DelegateStackIncreaseCommand_Err.ts new file mode 100644 index 00000000000..fe33805264f --- /dev/null +++ b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_DelegateStackIncreaseCommand_Err.ts @@ -0,0 +1,95 @@ +import { + logCommand, + PoxCommand, + Real, + Stub, + Wallet, +} from "./pox_CommandModel.ts"; +import { poxAddressToTuple } from "@stacks/stacking"; +import { expect } from "vitest"; +import { Cl } from "@stacks/transactions"; + +type CheckFunc = ( + this: DelegateStackIncreaseCommand_Err, + model: Readonly, +) => boolean; + +export class DelegateStackIncreaseCommand_Err implements PoxCommand { + readonly operator: Wallet; + readonly stacker: Wallet; + readonly increaseBy: number; + readonly checkFunc: CheckFunc; + readonly errorCode: number; + + /** + * Constructs a `DelegateStackIncreaseCommand_Err` to increase the uSTX amount + * previously locked on behalf of a Stacker. + * + * @param operator - Represents the Pool Operator's wallet. + * @param stacker - Represents the Stacker's wallet. + * @param increaseBy - Represents the locked amount to be increased by. + * @param checkFunc - A function to check constraints for running this command. + * @param errorCode - The expected error code when running this command. + */ + constructor( + operator: Wallet, + stacker: Wallet, + increaseBy: number, + checkFunc: CheckFunc, + errorCode: number, + ) { + this.operator = operator; + this.stacker = stacker; + this.increaseBy = increaseBy; + this.checkFunc = checkFunc; + this.errorCode = errorCode; + } + + check = (model: Readonly): boolean => this.checkFunc.call(this, model); + + run(model: Stub, real: Real): void { + const stackerWallet = model.stackers.get(this.stacker.stxAddress)!; + const prevLocked = stackerWallet.amountLocked; + // Act + const delegateStackIncrease = real.network.callPublicFn( + "ST000000000000000000002AMW42H.pox-4", + "delegate-stack-increase", + [ + // (stacker principal) + Cl.principal(this.stacker.stxAddress), + // (pox-addr { version: (buff 1), hashbytes: (buff 32) }) + poxAddressToTuple(this.operator.btcAddress), + // (increase-by uint) + Cl.uint(this.increaseBy), + ], + this.operator.stxAddress, + ); + + // Assert + expect(delegateStackIncrease.result).toBeErr(Cl.int(this.errorCode)); + + // Log to console for debugging purposes. This is not necessary for the + // test to pass but it is useful for debugging and eyeballing the test. + logCommand( + `â‚¿ ${model.burnBlockHeight}`, + `✗ ${this.operator.label} Ó¾ ${this.stacker.label}`, + "delegate-stack-increase", + "increased by", + this.increaseBy.toString(), + "previously locked", + prevLocked.toString(), + "total locked", + stackerWallet.amountLocked.toString(), + ); + + // Refresh the model's state if the network gets to the next reward cycle. + model.refreshStateForNextRewardCycle(real); + } + + toString() { + // fast-check will call toString() in case of errors, e.g. property failed. + // It will then make a minimal counterexample, a process called 'shrinking' + // https://github.com/dubzzz/fast-check/issues/2864#issuecomment-1098002642 + return `${this.operator.label} delegate-stack-increase by ${this.increaseBy}`; + } +} diff --git a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_DelegateStackStxCommand.ts b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_DelegateStackStxCommand.ts new file mode 100644 index 00000000000..70f56fc1914 --- /dev/null +++ b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_DelegateStackStxCommand.ts @@ -0,0 +1,198 @@ +import { + isAmountAboveThreshold, + isAmountWithinBalance, + isAmountWithinDelegationLimit, + isStackerDelegatingToOperator, + isDelegating, + isStacking, + isStackingMinimumCalculated, + isUBHWithinDelegationLimit, + logCommand, + PoxCommand, + Real, + Stub, + Wallet, +} from "./pox_CommandModel.ts"; +import { poxAddressToTuple } from "@stacks/stacking"; +import { assert, expect } from "vitest"; +import { + Cl, + ClarityType, + ClarityValue, + cvToValue, + isClarityType, +} from "@stacks/transactions"; +import { currentCycle } from "./pox_Commands.ts"; + +/** + * The `DelegateStackStxCommand` locks STX for stacking within PoX-4 on behalf + * of a delegator. This operation allows the `operator` to stack the `stacker`'s + * STX. + * + * Constraints for running this command include: + * - A minimum threshold of uSTX must be met, determined by the + * `get-stacking-minimum` function at the time of this call. + * - The Stacker cannot currently be engaged in another stacking operation. + * - The Stacker has to currently be delegating to the Operator. + * - The stacked uSTX amount should be less than or equal to the delegated + * amount. + * - The stacked uSTX amount should be less than or equal to the Stacker's + * balance. + * - The stacked uSTX amount should be greater than or equal to the minimum + * threshold of uSTX. + * - The Operator has to currently be delegated by the Stacker. + * - The Period has to fit the last delegation burn block height. + */ +export class DelegateStackStxCommand implements PoxCommand { + readonly operator: Wallet; + readonly stacker: Wallet; + readonly period: number; + readonly amountUstx: bigint; + readonly unlockBurnHt: number; + + /** + * Constructs a `DelegateStackStxCommand` to lock uSTX as a Pool Operator + * on behalf of a Stacker. + * + * @param operator - Represents the Pool Operator's wallet. + * @param stacker - Represents the Stacker's wallet. + * @param period - Number of reward cycles to lock uSTX. + * @param amountUstx - The uSTX amount stacked by the Operator on behalf + * of the Stacker. + * @param unlockBurnHt - The burn height at which the uSTX is unlocked. + */ + constructor( + operator: Wallet, + stacker: Wallet, + period: number, + amountUstx: bigint, + unlockBurnHt: number, + ) { + this.operator = operator; + this.stacker = stacker; + this.period = period; + this.amountUstx = amountUstx; + this.unlockBurnHt = unlockBurnHt; + } + + check(model: Readonly): boolean { + // Constraints for running this command include: + // - A minimum threshold of uSTX must be met, determined by the + // `get-stacking-minimum` function at the time of this call. + // - The Stacker cannot currently be engaged in another stacking + // operation. + // - The Stacker has to currently be delegating to the Operator. + // - The stacked uSTX amount should be less than or equal to the + // delegated amount. + // - The stacked uSTX amount should be less than or equal to the + // Stacker's balance. + // - The stacked uSTX amount should be greater than or equal to the + // minimum threshold of uSTX. + // - The Operator has to currently be delegated by the Stacker. + // - The Period has to fit the last delegation burn block height. + + const stackerWallet = model.stackers.get(this.stacker.stxAddress)!; + + return ( + isStackingMinimumCalculated(model) && + !isStacking(stackerWallet) && + isDelegating(stackerWallet) && + isAmountWithinDelegationLimit(stackerWallet, this.amountUstx) && + isAmountWithinBalance(stackerWallet, this.amountUstx) && + isAmountAboveThreshold(model, this.amountUstx) && + isStackerDelegatingToOperator(stackerWallet, this.operator) && + isUBHWithinDelegationLimit(stackerWallet, this.unlockBurnHt) + ); + } + + run(model: Stub, real: Real): void { + model.trackCommandRun(this.constructor.name); + const burnBlockHeightCV = real.network.runSnippet("burn-block-height"); + const burnBlockHeight = Number( + cvToValue(burnBlockHeightCV as ClarityValue), + ); + + // Act + const delegateStackStx = real.network.callPublicFn( + "ST000000000000000000002AMW42H.pox-4", + "delegate-stack-stx", + [ + // (stacker principal) + Cl.principal(this.stacker.stxAddress), + // (amount-ustx uint) + Cl.uint(this.amountUstx), + // (pox-addr { version: (buff 1), hashbytes: (buff 32) }) + poxAddressToTuple(this.operator.btcAddress), + // (start-burn-ht uint) + Cl.uint(burnBlockHeight), + // (lock-period uint) + Cl.uint(this.period), + ], + this.operator.stxAddress, + ); + const { result: rewardCycle } = real.network.callReadOnlyFn( + "ST000000000000000000002AMW42H.pox-4", + "burn-height-to-reward-cycle", + [Cl.uint(burnBlockHeight)], + this.operator.stxAddress, + ); + assert(isClarityType(rewardCycle, ClarityType.UInt)); + + const { result: unlockBurnHeight } = real.network.callReadOnlyFn( + "ST000000000000000000002AMW42H.pox-4", + "reward-cycle-to-burn-height", + [Cl.uint(Number(rewardCycle.value) + this.period + 1)], + this.operator.stxAddress, + ); + assert(isClarityType(unlockBurnHeight, ClarityType.UInt)); + + // Assert + expect(delegateStackStx.result).toBeOk( + Cl.tuple({ + stacker: Cl.principal(this.stacker.stxAddress), + "lock-amount": Cl.uint(this.amountUstx), + "unlock-burn-height": Cl.uint(Number(unlockBurnHeight.value)), + }), + ); + + // Get the Stacker's wallet from the model and update it with the new state. + const stackerWallet = model.stackers.get(this.stacker.stxAddress)!; + const operatorWallet = model.stackers.get(this.operator.stxAddress)!; + // Update model so that we know this wallet is stacking. This is important + // in order to prevent the test from stacking multiple times with the same + // address. + stackerWallet.isStacking = true; + // Update locked, unlocked, and unlock-height fields in the model. + stackerWallet.amountLocked = Number(this.amountUstx); + stackerWallet.unlockHeight = Number(unlockBurnHeight.value); + stackerWallet.amountUnlocked -= Number(this.amountUstx); + stackerWallet.firstLockedRewardCycle = currentCycle(real.network) + 1; + // Add stacker to the operators lock list. This will help knowing that + // the stacker's funds are locked when calling delegate-stack-extend + // and delegate-stack-increase. + operatorWallet.lockedAddresses.push(this.stacker.stxAddress); + operatorWallet.amountToCommit += Number(this.amountUstx); + + // Log to console for debugging purposes. This is not necessary for the + // test to pass but it is useful for debugging and eyeballing the test. + logCommand( + `â‚¿ ${model.burnBlockHeight}`, + `✓ ${this.operator.label} Ó¾ ${this.stacker.label}`, + "delegate-stack-stx", + "lock-amount", + this.amountUstx.toString(), + "until", + stackerWallet.unlockHeight.toString(), + ); + + // Refresh the model's state if the network gets to the next reward cycle. + model.refreshStateForNextRewardCycle(real); + } + + toString() { + // fast-check will call toString() in case of errors, e.g. property failed. + // It will then make a minimal counterexample, a process called 'shrinking' + // https://github.com/dubzzz/fast-check/issues/2864#issuecomment-1098002642 + return `${this.operator.label} delegate-stack-stx stacker ${this.stacker.label} period ${this.period}`; + } +} diff --git a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_DelegateStackStxCommand_Err.ts b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_DelegateStackStxCommand_Err.ts new file mode 100644 index 00000000000..fdec28a3558 --- /dev/null +++ b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_DelegateStackStxCommand_Err.ts @@ -0,0 +1,105 @@ +import { + logCommand, + PoxCommand, + Real, + Stub, + Wallet, +} from "./pox_CommandModel.ts"; +import { poxAddressToTuple } from "@stacks/stacking"; +import { expect } from "vitest"; +import { Cl, ClarityValue, cvToValue } from "@stacks/transactions"; + +type CheckFunc = ( + this: DelegateStackStxCommand_Err, + model: Readonly, +) => boolean; + +export class DelegateStackStxCommand_Err implements PoxCommand { + readonly operator: Wallet; + readonly stacker: Wallet; + readonly period: number; + readonly amountUstx: bigint; + readonly unlockBurnHt: number; + readonly checkFunc: CheckFunc; + readonly errorCode: number; + + /** + * Constructs a `DelegateStackStxCommand` to lock uSTX as a Pool Operator + * on behalf of a Stacker. + * + * @param operator - Represents the Pool Operator's wallet. + * @param stacker - Represents the Stacker's wallet. + * @param period - Number of reward cycles to lock uSTX. + * @param amountUstx - The uSTX amount stacked by the Operator on behalf + * of the Stacker. + * @param unlockBurnHt - The burn height at which the uSTX is unlocked. + */ + constructor( + operator: Wallet, + stacker: Wallet, + period: number, + amountUstx: bigint, + unlockBurnHt: number, + checkFunc: CheckFunc, + errorCode: number, + ) { + this.operator = operator; + this.stacker = stacker; + this.period = period; + this.amountUstx = amountUstx; + this.unlockBurnHt = unlockBurnHt; + this.checkFunc = checkFunc; + this.errorCode = errorCode; + } + + check = (model: Readonly): boolean => this.checkFunc.call(this, model); + + run(model: Stub, real: Real): void { + const burnBlockHeightCV = real.network.runSnippet("burn-block-height"); + const burnBlockHeight = Number( + cvToValue(burnBlockHeightCV as ClarityValue), + ); + + // Act + const delegateStackStx = real.network.callPublicFn( + "ST000000000000000000002AMW42H.pox-4", + "delegate-stack-stx", + [ + // (stacker principal) + Cl.principal(this.stacker.stxAddress), + // (amount-ustx uint) + Cl.uint(this.amountUstx), + // (pox-addr { version: (buff 1), hashbytes: (buff 32) }) + poxAddressToTuple(this.operator.btcAddress), + // (start-burn-ht uint) + Cl.uint(burnBlockHeight), + // (lock-period uint) + Cl.uint(this.period), + ], + this.operator.stxAddress, + ); + + // Assert + expect(delegateStackStx.result).toBeErr(Cl.int(this.errorCode)); + + // Log to console for debugging purposes. This is not necessary for the + // test to pass but it is useful for debugging and eyeballing the test. + logCommand( + `â‚¿ ${model.burnBlockHeight}`, + `✗ ${this.operator.label} Ó¾ ${this.stacker.label}`, + "delegate-stack-stx", + "lock-amount", + this.amountUstx.toString(), + ); + + // Refresh the model's state if the network gets to the next reward cycle. + model.refreshStateForNextRewardCycle(real); + } + + toString() { + // fast-check will call toString() in case of errors, e.g. property failed. + // It will then make a minimal counterexample, a process called 'shrinking' + // https://github.com/dubzzz/fast-check/issues/2864#issuecomment-1098002642 + return `${this.operator.label} delegate-stack-stx stacker ${this.stacker.label} period ${this.period}`; + } +} diff --git a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_DelegateStxCommand.ts b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_DelegateStxCommand.ts new file mode 100644 index 00000000000..cd14c39ce30 --- /dev/null +++ b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_DelegateStxCommand.ts @@ -0,0 +1,136 @@ +import { + isDelegating, + isStackingMinimumCalculated, + logCommand, + PoxCommand, + Real, + Stub, + Wallet, +} from "./pox_CommandModel.ts"; +import { poxAddressToTuple } from "@stacks/stacking"; +import { expect } from "vitest"; +import { + boolCV, + Cl, + ClarityType, + cvToValue, + isClarityType, + OptionalCV, + UIntCV, +} from "@stacks/transactions"; + +/** + * The `DelegateStxCommand` delegates STX for stacking within PoX-4. This + * operation allows the `tx-sender` (the `wallet` in this case) to delegate + * stacking participation to a `delegatee`. + * + * Constraints for running this command include: + * - The Stacker cannot currently be a delegator in another delegation. + */ +export class DelegateStxCommand implements PoxCommand { + readonly wallet: Wallet; + readonly delegateTo: Wallet; + readonly untilBurnHt: OptionalCV; + readonly amount: bigint; + + /** + * Constructs a `DelegateStxCommand` to delegate uSTX for stacking. + * + * @param wallet - Represents the Stacker's wallet. + * @param delegateTo - Represents the Delegatee's STX address. + * @param untilBurnHt - The burn block height until the delegation is valid. + * @param amount - The maximum amount the `Stacker` delegates the `Delegatee` + * to stack on his behalf. + */ + constructor( + wallet: Wallet, + delegateTo: Wallet, + untilBurnHt: OptionalCV, + amount: bigint, + ) { + this.wallet = wallet; + this.delegateTo = delegateTo; + this.untilBurnHt = untilBurnHt; + this.amount = amount; + } + + check(model: Readonly): boolean { + // Constraints for running this command include: + // - The Stacker cannot currently be a delegator in another delegation. + const stackerWallet = model.stackers.get(this.wallet.stxAddress)!; + + return ( + isStackingMinimumCalculated(model) && + !isDelegating(stackerWallet) + ); + } + + run(model: Stub, real: Real): void { + model.trackCommandRun(this.constructor.name); + + // The amount of uSTX delegated by the Stacker to the Delegatee. + // Even if there are no constraints about the delegated amount, + // it will be checked in the future, when calling delegate-stack-stx. + const amountUstx = Number(this.amount); + + // Act + const delegateStx = real.network.callPublicFn( + "ST000000000000000000002AMW42H.pox-4", + "delegate-stx", + [ + // (amount-ustx uint) + Cl.uint(amountUstx), + // (delegate-to principal) + Cl.principal(this.delegateTo.stxAddress), + // (until-burn-ht (optional uint)) + this.untilBurnHt, + // (pox-addr (optional { version: (buff 1), hashbytes: (buff 32) })) + Cl.some(poxAddressToTuple(this.delegateTo.btcAddress)), + ], + this.wallet.stxAddress, + ); + + // Assert + expect(delegateStx.result).toBeOk(boolCV(true)); + + // Get the wallet from the model and update it with the new state. + const wallet = model.stackers.get(this.wallet.stxAddress)!; + const delegatedWallet = model.stackers.get(this.delegateTo.stxAddress)!; + // Update model so that we know this wallet has delegated. This is important + // in order to prevent the test from delegating multiple times with the same + // address. + wallet.hasDelegated = true; + wallet.delegatedTo = this.delegateTo.stxAddress; + wallet.delegatedMaxAmount = amountUstx; + wallet.delegatedUntilBurnHt = + isClarityType(this.untilBurnHt, ClarityType.OptionalNone) + ? undefined + : Number(cvToValue(this.untilBurnHt).value); + wallet.delegatedPoxAddress = this.delegateTo.btcAddress; + + delegatedWallet.poolMembers.push(this.wallet.stxAddress); + // Log to console for debugging purposes. This is not necessary for the + // test to pass but it is useful for debugging and eyeballing the test. + logCommand( + `â‚¿ ${model.burnBlockHeight}`, + `✓ ${this.wallet.label}`, + "delegate-stx", + "amount", + amountUstx.toString(), + "delegated to", + this.delegateTo.label, + "until", + this.untilBurnHt.toString(), + ); + + // Refresh the model's state if the network gets to the next reward cycle. + model.refreshStateForNextRewardCycle(real); + } + + toString() { + // fast-check will call toString() in case of errors, e.g. property failed. + // It will then make a minimal counterexample, a process called 'shrinking' + // https://github.com/dubzzz/fast-check/issues/2864#issuecomment-1098002642 + return `${this.wallet.label} delegate-stx to ${this.delegateTo.label} until burn ht ${this.untilBurnHt}`; + } +} diff --git a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_DelegateStxCommand_Err.ts b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_DelegateStxCommand_Err.ts new file mode 100644 index 00000000000..138d99265fa --- /dev/null +++ b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_DelegateStxCommand_Err.ts @@ -0,0 +1,104 @@ +import { + logCommand, + PoxCommand, + Real, + Stub, + Wallet, +} from "./pox_CommandModel.ts"; +import { poxAddressToTuple } from "@stacks/stacking"; +import { expect } from "vitest"; +import { Cl } from "@stacks/transactions"; + +type CheckFunc = ( + this: DelegateStxCommand_Err, + model: Readonly, +) => boolean; + +export class DelegateStxCommand_Err implements PoxCommand { + readonly wallet: Wallet; + readonly delegateTo: Wallet; + readonly untilBurnHt: number; + readonly amount: bigint; + readonly checkFunc: CheckFunc; + readonly errorCode: number; + + /** + * Constructs a `DelegateStxCommand_Err` to delegate uSTX for stacking. + * + * @param wallet - Represents the Stacker's wallet. + * @param delegateTo - Represents the Delegatee's STX address. + * @param untilBurnHt - The burn block height until the delegation is valid. + * @param amount - The maximum amount the `Stacker` delegates the `Delegatee` + * to stack on his behalf. + * @param checkFunc - A function to check constraints for running this command. + * @param errorCode - The expected error code when running this command. + */ + constructor( + wallet: Wallet, + delegateTo: Wallet, + untilBurnHt: number, + amount: bigint, + checkFunc: CheckFunc, + errorCode: number, + ) { + this.wallet = wallet; + this.delegateTo = delegateTo; + this.untilBurnHt = untilBurnHt; + this.amount = amount; + this.checkFunc = checkFunc; + this.errorCode = errorCode; + } + + check = (model: Readonly): boolean => this.checkFunc.call(this, model); + + run(model: Stub, real: Real): void { + // The amount of uSTX delegated by the Stacker to the Delegatee. + // Even if there are no constraints about the delegated amount, + // it will be checked in the future, when calling delegate-stack-stx. + const amountUstx = Number(this.amount); + + // Act + const delegateStx = real.network.callPublicFn( + "ST000000000000000000002AMW42H.pox-4", + "delegate-stx", + [ + // (amount-ustx uint) + Cl.uint(amountUstx), + // (delegate-to principal) + Cl.principal(this.delegateTo.stxAddress), + // (until-burn-ht (optional uint)) + Cl.some(Cl.uint(this.untilBurnHt)), + // (pox-addr (optional { version: (buff 1), hashbytes: (buff 32) })) + Cl.some(poxAddressToTuple(this.delegateTo.btcAddress)), + ], + this.wallet.stxAddress, + ); + + // Assert + expect(delegateStx.result).toBeErr(Cl.int(this.errorCode)); + + // Log to console for debugging purposes. This is not necessary for the + // test to pass but it is useful for debugging and eyeballing the test. + logCommand( + `â‚¿ ${model.burnBlockHeight}`, + `✗ ${this.wallet.label}`, + "delegate-stx", + "amount", + amountUstx.toString(), + "delegated to", + this.delegateTo.label, + "until", + this.untilBurnHt.toString(), + ); + + // Refresh the model's state if the network gets to the next reward cycle. + model.refreshStateForNextRewardCycle(real); + } + + toString() { + // fast-check will call toString() in case of errors, e.g. property failed. + // It will then make a minimal counterexample, a process called 'shrinking' + // https://github.com/dubzzz/fast-check/issues/2864#issuecomment-1098002642 + return `${this.wallet.label} delegate-stx to ${this.delegateTo.label} until burn ht ${this.untilBurnHt}`; + } +} diff --git a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_DisallowContractCallerCommand.ts b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_DisallowContractCallerCommand.ts new file mode 100644 index 00000000000..6108a5973f1 --- /dev/null +++ b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_DisallowContractCallerCommand.ts @@ -0,0 +1,113 @@ +import { + isAllowedContractCaller, + isCallerAllowedByStacker, + logCommand, + PoxCommand, + Real, + Stub, + Wallet, +} from "./pox_CommandModel.ts"; +import { expect } from "vitest"; +import { boolCV, Cl } from "@stacks/transactions"; + +/** + * The `DisallowContractCallerComand` revokes a `contract-caller`'s + * authorization to call stacking methods. + * + * Constraints for running this command include: + * - The Caller to be disallowed must have been previously + * allowed by the Operator. + */ +export class DisallowContractCallerCommand implements PoxCommand { + readonly stacker: Wallet; + readonly callerToDisallow: Wallet; + + /** + * Constructs a `DisallowContractCallerComand` to revoke authorization + * for calling stacking methods. + * + * @param stacker - Represents the `Stacker`'s wallet. + * @param callerToDisallow - The `contract-caller` to be revoked. + */ + constructor(stacker: Wallet, callerToDisallow: Wallet) { + this.stacker = stacker; + this.callerToDisallow = callerToDisallow; + } + + check(model: Readonly): boolean { + // Constraints for running this command include: + // - The Caller to be disallowed must have been previously allowed + // by the Operator. + + const stacker = model.stackers.get(this.stacker.stxAddress)!; + const callerToDisallow = model.stackers.get( + this.callerToDisallow.stxAddress, + )!; + + return ( + isAllowedContractCaller(stacker, this.callerToDisallow) && + isCallerAllowedByStacker(this.stacker, callerToDisallow) + ); + } + + run(model: Stub, real: Real): void { + model.trackCommandRun(this.constructor.name); + + // Act + const disallowContractCaller = real.network.callPublicFn( + "ST000000000000000000002AMW42H.pox-4", + "disallow-contract-caller", + [ + // (caller principal) + Cl.principal(this.callerToDisallow.stxAddress), + ], + this.stacker.stxAddress, + ); + + // Assert + expect(disallowContractCaller.result).toBeOk(boolCV(true)); + + // Get the wallet to be revoked stacking rights from the model and + // update it with the new state. + const callerToDisallow = model.stackers.get( + this.callerToDisallow.stxAddress, + )!; + + // Update model so that we know that the stacker has revoked stacking + // allowance. + const stacker = model.stackers.get(this.stacker.stxAddress)!; + const callerToDisallowIndex = stacker.allowedContractCallers.indexOf( + this.callerToDisallow.stxAddress, + ); + + expect(callerToDisallowIndex).toBeGreaterThan(-1); + stacker.allowedContractCallers.splice(callerToDisallowIndex, 1); + + // Remove the operator from the caller to disallow's allowance list. + const walletIndexAllowedByList = callerToDisallow.callerAllowedBy.indexOf( + this.stacker.stxAddress, + ); + + expect(walletIndexAllowedByList).toBeGreaterThan(-1); + callerToDisallow.callerAllowedBy.splice(walletIndexAllowedByList, 1); + + // Log to console for debugging purposes. This is not necessary for the + // test to pass but it is useful for debugging and eyeballing the test. + logCommand( + `â‚¿ ${model.burnBlockHeight}`, + `✓ ${this.stacker.label}`, + "disallow-contract-caller", + this.callerToDisallow.label, + ); + + // Refresh the model's state if the network gets to the next reward cycle. + model.refreshStateForNextRewardCycle(real); + } + + toString() { + // fast-check will call toString() in case of errors, e.g. property failed. + // It will then make a minimal counterexample, a process called 'shrinking' + // https://github.com/dubzzz/fast-check/issues/2864#issuecomment-1098002642 + return `${this.stacker.label} disallow-contract-caller ${this.callerToDisallow.label}`; + } +} diff --git a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_DisallowContractCallerCommand_Err.ts b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_DisallowContractCallerCommand_Err.ts new file mode 100644 index 00000000000..028457b41e4 --- /dev/null +++ b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_DisallowContractCallerCommand_Err.ts @@ -0,0 +1,73 @@ +import { + logCommand, + PoxCommand, + Real, + Stub, + Wallet, +} from "./pox_CommandModel.ts"; +import { expect } from "vitest"; +import { Cl } from "@stacks/transactions"; + +type CheckFunc = ( + this: DisallowContractCallerCommand_Err, + model: Readonly, +) => boolean; + +export class DisallowContractCallerCommand_Err implements PoxCommand { + readonly stacker: Wallet; + readonly callerToDisallow: Wallet; + readonly checkFunc: CheckFunc; + + /** + * Constructs a `DisallowContractCallerComand` to revoke authorization + * for calling stacking methods. + * + * @param stacker - Represents the `Stacker`'s wallet. + * @param callerToDisallow - The `contract-caller` to be revoked. + * @param checkFunc - A function to check constraints for running this command. + */ + constructor(stacker: Wallet, callerToDisallow: Wallet, checkFunc: CheckFunc) { + this.stacker = stacker; + this.callerToDisallow = callerToDisallow; + this.checkFunc = checkFunc; + } + + check = (model: Readonly): boolean => this.checkFunc.call(this, model); // Constraints for running this command include: + // - The Caller to be disallowed must have been previously allowed + // by the Operator. + + run(model: Stub, real: Real): void { + // Act + const disallowContractCaller = real.network.callPublicFn( + "ST000000000000000000002AMW42H.pox-4", + "disallow-contract-caller", + [ + // (caller principal) + Cl.principal(this.callerToDisallow.stxAddress), + ], + this.stacker.stxAddress, + ); + + // Assert + expect(disallowContractCaller.result).toBeOk(Cl.bool(false)); + + // Log to console for debugging purposes. This is not necessary for the + // test to pass but it is useful for debugging and eyeballing the test. + logCommand( + `â‚¿ ${model.burnBlockHeight}`, + `✗ ${this.stacker.label}`, + "disallow-contract-caller", + this.callerToDisallow.label, + ); + + // Refresh the model's state if the network gets to the next reward cycle. + model.refreshStateForNextRewardCycle(real); + } + + toString() { + // fast-check will call toString() in case of errors, e.g. property failed. + // It will then make a minimal counterexample, a process called 'shrinking' + // https://github.com/dubzzz/fast-check/issues/2864#issuecomment-1098002642 + return `${this.stacker.label} disallow-contract-caller ${this.callerToDisallow.label}`; + } +} diff --git a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_GetStackingMinimumCommand.ts b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_GetStackingMinimumCommand.ts new file mode 100644 index 00000000000..50dd7bf16c0 --- /dev/null +++ b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_GetStackingMinimumCommand.ts @@ -0,0 +1,70 @@ +import { + logCommand, + PoxCommand, + Real, + Stub, + Wallet, +} from "./pox_CommandModel.ts"; +import { assert } from "vitest"; +import { ClarityType, isClarityType } from "@stacks/transactions"; + +/** + * Implements the `PoxCommand` interface to get the minimum stacking amount + * required for a given reward cycle. + */ +export class GetStackingMinimumCommand implements PoxCommand { + readonly wallet: Wallet; + + /** + * Constructs a new `GetStackingMinimumCommand`. + * + * @param wallet The wallet information, including the STX address used to + * query the stacking minimum requirement. + */ + constructor(wallet: Wallet) { + this.wallet = wallet; + } + + check(_model: Readonly): boolean { + // There are no constraints for running this command. + return true; + } + + run(model: Stub, real: Real): void { + model.trackCommandRun(this.constructor.name); + + // Act + const { result: stackingMinimum } = real.network.callReadOnlyFn( + "ST000000000000000000002AMW42H.pox-4", + "get-stacking-minimum", + [], + this.wallet.stxAddress, + ); + assert(isClarityType(stackingMinimum, ClarityType.UInt)); + + // Update the model with the new stacking minimum. This is important for + // the `check` method of the `StackStxCommand` class to work correctly, as + // we as other tests that may depend on the stacking minimum. + model.stackingMinimum = Number(stackingMinimum.value); + + // Log to console for debugging purposes. This is not necessary for the + // test to pass but it is useful for debugging and eyeballing the test. + logCommand( + `â‚¿ ${model.burnBlockHeight}`, + `✓ ${this.wallet.label}`, + "get-stacking-minimum", + "pox-4", + stackingMinimum.value.toString(), + ); + + // Refresh the model's state if the network gets to the next reward cycle. + model.refreshStateForNextRewardCycle(real); + } + + toString() { + // fast-check will call toString() in case of errors, e.g. property failed. + // It will then make a minimal counterexample, a process called 'shrinking' + // https://github.com/dubzzz/fast-check/issues/2864#issuecomment-1098002642 + return `${this.wallet.label} get-stacking-minimum`; + } +} diff --git a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_GetStxAccountCommand.ts b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_GetStxAccountCommand.ts new file mode 100644 index 00000000000..60d8ff38b28 --- /dev/null +++ b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_GetStxAccountCommand.ts @@ -0,0 +1,72 @@ +import { + logCommand, + PoxCommand, + Real, + Stub, + Wallet, +} from "./pox_CommandModel.ts"; +import { expect } from "vitest"; +import { Cl } from "@stacks/transactions"; + +/** + * Implements the `PoxCommand` interface to get the info returned from the + * `stx-account`. + */ +export class GetStxAccountCommand implements PoxCommand { + readonly wallet: Wallet; + + /** + * Constructs a new `GetStxAccountCommand`. + * + * @param wallet The wallet information, including the STX address used to + * query the `stx-account`. + */ + constructor(wallet: Wallet) { + this.wallet = wallet; + } + + check(_model: Readonly): boolean { + // There are no constraints for running this command. + return true; + } + + run(model: Stub, real: Real): void { + model.trackCommandRun(this.constructor.name); + + const actual = model.stackers.get(this.wallet.stxAddress)!; + expect(real.network.runSnippet(`(stx-account '${this.wallet.stxAddress})`)) + .toBeTuple({ + "locked": Cl.uint(actual.amountLocked), + "unlocked": Cl.uint(actual.amountUnlocked), + "unlock-height": Cl.uint(actual.unlockHeight), + }); + + expect(actual.amountLocked + actual.amountUnlocked).toBe( + actual.ustxBalance, + ); + + // Log to console for debugging purposes. This is not necessary for the + // test to pass but it is useful for debugging and eyeballing the test. + logCommand( + `â‚¿ ${model.burnBlockHeight}`, + `✓ ${this.wallet.label}`, + "stx-account", + "lock-amount", + actual.amountLocked.toString(), + "unlocked-amount", + actual.amountUnlocked.toString(), + "unlocked-height", + actual.unlockHeight.toString(), + ); + + // Refresh the model's state if the network gets to the next reward cycle. + model.refreshStateForNextRewardCycle(real); + } + + toString() { + // fast-check will call toString() in case of errors, e.g. property failed. + // It will then make a minimal counterexample, a process called 'shrinking' + // https://github.com/dubzzz/fast-check/issues/2864#issuecomment-1098002642 + return `${this.wallet.label} stx-account`; + } +} diff --git a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_RevokeDelegateStxCommand.ts b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_RevokeDelegateStxCommand.ts new file mode 100644 index 00000000000..98e2349a1fe --- /dev/null +++ b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_RevokeDelegateStxCommand.ts @@ -0,0 +1,119 @@ +import { + isDelegating, + isStackingMinimumCalculated, + isUBHWithinDelegationLimit, + logCommand, + PoxCommand, + Real, + Stub, + Wallet, +} from "./pox_CommandModel.ts"; +import { poxAddressToTuple } from "@stacks/stacking"; +import { expect } from "vitest"; +import { Cl, someCV, tupleCV } from "@stacks/transactions"; + +/** + * The `RevokeDelegateStxCommand` revokes the delegation for stacking within + * PoX-4. + * + * Constraints for running this command include: + * - The `Stacker` has to currently be delegating. + * - The `Stacker`'s delegation must not be expired. + */ +export class RevokeDelegateStxCommand implements PoxCommand { + readonly wallet: Wallet; + + /** + * Constructs a `RevokeDelegateStxCommand` to revoke a stacking delegation. + * + * @param wallet - Represents the Stacker's wallet. + */ + constructor(wallet: Wallet) { + this.wallet = wallet; + } + + check(model: Readonly): boolean { + // Constraints for running this command include: + // - The Stacker has to currently be delegating. + // - The Stacker's delegation must not be expired. + const stacker = model.stackers.get(this.wallet.stxAddress)!; + + return ( + isStackingMinimumCalculated(model) && + isDelegating(stacker) && + isUBHWithinDelegationLimit(stacker, model.burnBlockHeight) + ); + } + + run(model: Stub, real: Real): void { + model.trackCommandRun(this.constructor.name); + + const wallet = model.stackers.get(this.wallet.stxAddress)!; + const operatorWallet = model.stackers.get(wallet.delegatedTo)!; + const expectedUntilBurnHt = wallet.delegatedUntilBurnHt === undefined + ? Cl.none() + : Cl.some(Cl.uint(wallet.delegatedUntilBurnHt)); + + // Act + const revokeDelegateStx = real.network.callPublicFn( + "ST000000000000000000002AMW42H.pox-4", + "revoke-delegate-stx", + [], + this.wallet.stxAddress, + ); + + // Assert + expect(revokeDelegateStx.result).toBeOk( + someCV( + tupleCV({ + "amount-ustx": Cl.uint(wallet.delegatedMaxAmount), + "delegated-to": Cl.principal( + model.stackers.get(this.wallet.stxAddress)!.delegatedTo || "", + ), + "pox-addr": Cl.some( + poxAddressToTuple(wallet.delegatedPoxAddress || ""), + ), + "until-burn-ht": expectedUntilBurnHt, + }), + ), + ); + + // Get the Stacker's wallet from the model and update the two wallets + // involved with the new state. + // Update model so that we know this wallet is not delegating anymore. + // This is important in order to prevent the test from revoking the + // delegation multiple times with the same address. + // We update delegatedUntilBurnHt to 0, and not undefined. Undefined + // stands for indefinite delegation. + wallet.hasDelegated = false; + wallet.delegatedTo = ""; + wallet.delegatedUntilBurnHt = 0; + wallet.delegatedMaxAmount = 0; + wallet.delegatedPoxAddress = ""; + + // Remove the Stacker from the Pool Operator's pool members list. + const walletIndexInDelegatorsList = operatorWallet.poolMembers.indexOf( + this.wallet.stxAddress, + ); + expect(walletIndexInDelegatorsList).toBeGreaterThan(-1); + operatorWallet.poolMembers.splice(walletIndexInDelegatorsList, 1); + + // Log to console for debugging purposes. This is not necessary for the + // test to pass but it is useful for debugging and eyeballing the test. + logCommand( + `â‚¿ ${model.burnBlockHeight}`, + `✓ ${this.wallet.label}`, + "revoke-delegate-stx", + ); + + // Refresh the model's state if the network gets to the next reward cycle. + model.refreshStateForNextRewardCycle(real); + } + + toString() { + // fast-check will call toString() in case of errors, e.g. property failed. + // It will then make a minimal counterexample, a process called 'shrinking' + // https://github.com/dubzzz/fast-check/issues/2864#issuecomment-1098002642 + return `${this.wallet.stxAddress} revoke-delegate-stx`; + } +} diff --git a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_RevokeDelegateStxCommand_Err.ts b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_RevokeDelegateStxCommand_Err.ts new file mode 100644 index 00000000000..a7a4cb0a6e5 --- /dev/null +++ b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_RevokeDelegateStxCommand_Err.ts @@ -0,0 +1,66 @@ +import { + logCommand, + PoxCommand, + Real, + Stub, + Wallet, +} from "./pox_CommandModel.ts"; +import { expect } from "vitest"; +import { Cl } from "@stacks/transactions"; + +type CheckFunc = ( + this: RevokeDelegateStxCommand_Err, + model: Readonly, +) => boolean; + +export class RevokeDelegateStxCommand_Err implements PoxCommand { + readonly wallet: Wallet; + readonly checkFunc: CheckFunc; + readonly errorCode: number; + + /** + * Constructs a `RevokeDelegateStxCommand_Err` to revoke a stacking delegation. + * + * @param wallet - Represents the Stacker's wallet. + * @param checkFunc - A function to check constraints for running this command. + * @param errorCode - The expected error code when running this command. + */ + constructor(wallet: Wallet, checkFunc: CheckFunc, errorCode: number) { + this.wallet = wallet; + this.checkFunc = checkFunc; + this.errorCode = errorCode; + } + + check = (model: Readonly): boolean => this.checkFunc.call(this, model); + + run(model: Stub, real: Real): void { + // Act + const revokeDelegateStx = real.network.callPublicFn( + "ST000000000000000000002AMW42H.pox-4", + "revoke-delegate-stx", + [], + this.wallet.stxAddress, + ); + + // Assert + expect(revokeDelegateStx.result).toBeErr(Cl.int(this.errorCode)); + + // Log to console for debugging purposes. This is not necessary for the + // test to pass but it is useful for debugging and eyeballing the test. + logCommand( + `â‚¿ ${model.burnBlockHeight}`, + `✗ ${this.wallet.label}`, + "revoke-delegate-stx", + ); + + // Refresh the model's state if the network gets to the next reward cycle. + model.refreshStateForNextRewardCycle(real); + } + + toString() { + // fast-check will call toString() in case of errors, e.g. property failed. + // It will then make a minimal counterexample, a process called 'shrinking' + // https://github.com/dubzzz/fast-check/issues/2864#issuecomment-1098002642 + return `${this.wallet.stxAddress} revoke-delegate-stx`; + } +} diff --git a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackAggregationCommitAuthCommand.ts b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackAggregationCommitAuthCommand.ts new file mode 100644 index 00000000000..7145c673d41 --- /dev/null +++ b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackAggregationCommitAuthCommand.ts @@ -0,0 +1,150 @@ +import { + hasLockedStackers, + isATCAboveThreshold, + logCommand, + PoxCommand, + Real, + Stub, + Wallet, +} from "./pox_CommandModel.ts"; +import { poxAddressToTuple } from "@stacks/stacking"; +import { expect } from "vitest"; +import { Cl } from "@stacks/transactions"; +import { currentCycle } from "./pox_Commands.ts"; +import { tx } from "@hirosystems/clarinet-sdk"; + +/** + * The `StackAggregationCommitAuthCommand` allows an operator to commit + * partially stacked STX & to allocate a new PoX reward address slot. + * This allows a stacker to lock fewer STX than the minimal threshold + * in multiple transactions, so long as: + * 1. The pox-addr is the same. + * 2. The "commit" transaction is called _before_ the PoX anchor block. + * + * This command calls stack-aggregation-commit using an `authorization`. + * + * Constraints for running this command include: + * - The Operator must have locked STX on behalf of at least one stacker. + * - The total amount previously locked by the Operator on behalf of the + * stackers has to be greater than the uSTX threshold. + */ +export class StackAggregationCommitAuthCommand implements PoxCommand { + readonly operator: Wallet; + readonly authId: number; + + /** + * Constructs a `StackAggregationCommitAuthCommand` to commit partially + * locked uSTX. + * + * @param operator - Represents the `Operator`'s wallet. + * @param authId - Unique `auth-id` for the authorization. + */ + constructor( + operator: Wallet, + authId: number, + ) { + this.operator = operator; + this.authId = authId; + } + + check(model: Readonly): boolean { + // Constraints for running this command include: + // - The Operator must have locked STX on behalf of at least one stacker. + // - The total amount previously locked by the Operator on behalf of the + // stackers has to be greater than the uSTX threshold. + + const operator = model.stackers.get(this.operator.stxAddress)!; + return ( + hasLockedStackers(operator) && + isATCAboveThreshold(operator, model) + ); + } + + run(model: Stub, real: Real): void { + model.trackCommandRun(this.constructor.name); + const currentRewCycle = currentCycle(real.network); + const operatorWallet = model.stackers.get(this.operator.stxAddress)!; + const committedAmount = operatorWallet.amountToCommit; + + // Act + + // Include the authorization and the `stack-aggregation-commit` transactions + // in a single block. This way we ensure both the authorization and the + // stack-aggregation-commit transactions are called during the same reward + // cycle, so the authorization currentRewCycle param is relevant for the + // upcoming stack-aggregation-commit call. + const block = real.network.mineBlock([ + tx.callPublicFn( + "ST000000000000000000002AMW42H.pox-4", + "set-signer-key-authorization", + [ + // (pox-addr (tuple (version (buff 1)) (hashbytes (buff 32)))) + poxAddressToTuple(this.operator.btcAddress), + // (period uint) + Cl.uint(1), + // (reward-cycle uint) + Cl.uint(currentRewCycle + 1), + // (topic (string-ascii 14)) + Cl.stringAscii("agg-commit"), + // (signer-key (buff 33)) + Cl.bufferFromHex(this.operator.signerPubKey), + // (allowed bool) + Cl.bool(true), + // (max-amount uint) + Cl.uint(committedAmount), + // (auth-id uint) + Cl.uint(this.authId), + ], + this.operator.stxAddress, + ), + tx.callPublicFn( + "ST000000000000000000002AMW42H.pox-4", + "stack-aggregation-commit", + [ + // (pox-addr (tuple (version (buff 1)) (hashbytes (buff 32)))) + poxAddressToTuple(this.operator.btcAddress), + // (reward-cycle uint) + Cl.uint(currentRewCycle + 1), + // (signer-sig (optional (buff 65))) + Cl.none(), + // (signer-key (buff 33)) + Cl.bufferFromHex(this.operator.signerPubKey), + // (max-amount uint) + Cl.uint(committedAmount), + // (auth-id uint) + Cl.uint(this.authId), + ], + this.operator.stxAddress, + ), + ]); + + // Assert + expect(block[0].result).toBeOk(Cl.bool(true)); + expect(block[1].result).toBeOk(Cl.bool(true)); + + operatorWallet.amountToCommit -= committedAmount; + operatorWallet.committedRewCycleIndexes.push(model.nextRewardSetIndex); + model.nextRewardSetIndex++; + + // Log to console for debugging purposes. This is not necessary for the + // test to pass but it is useful for debugging and eyeballing the test. + logCommand( + `â‚¿ ${model.burnBlockHeight}`, + `✓ ${this.operator.label}`, + "stack-agg-commit", + "amount committed", + committedAmount.toString(), + "authorization", + ); + + // Refresh the model's state if the network gets to the next reward cycle. + model.refreshStateForNextRewardCycle(real); + } + + toString() { + // fast-check will call toString() in case of errors, e.g. property failed. + // It will then make a minimal counterexample, a process called 'shrinking' + // https://github.com/dubzzz/fast-check/issues/2864#issuecomment-1098002642 + return `${this.operator.label} stack-aggregation-commit auth-id ${this.authId}`; + } +} diff --git a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackAggregationCommitAuthCommand_Err.ts b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackAggregationCommitAuthCommand_Err.ts new file mode 100644 index 00000000000..ddc986f1a42 --- /dev/null +++ b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackAggregationCommitAuthCommand_Err.ts @@ -0,0 +1,128 @@ +import { + logCommand, + PoxCommand, + Real, + Stub, + Wallet, +} from "./pox_CommandModel.ts"; +import { poxAddressToTuple } from "@stacks/stacking"; +import { expect } from "vitest"; +import { Cl } from "@stacks/transactions"; +import { currentCycle } from "./pox_Commands.ts"; +import { tx } from "@hirosystems/clarinet-sdk"; + +type CheckFunc = ( + this: StackAggregationCommitAuthCommand_Err, + model: Readonly, +) => boolean; + +export class StackAggregationCommitAuthCommand_Err implements PoxCommand { + readonly operator: Wallet; + readonly authId: number; + readonly checkFunc: CheckFunc; + readonly errorCode: number; + + /** + * Constructs a `StackAggregationCommitAuthCommand_Err` to commit partially + * locked uSTX. + * + * @param operator - Represents the `Operator`'s wallet. + * @param authId - Unique `auth-id` for the authorization. + * @param checkFunc - A function to check constraints for running this command. + * @param errorCode - The expected error code when running this command. + */ + constructor( + operator: Wallet, + authId: number, + checkFunc: CheckFunc, + errorCode: number, + ) { + this.operator = operator; + this.authId = authId; + this.checkFunc = checkFunc; + this.errorCode = errorCode; + } + + check = (model: Readonly): boolean => this.checkFunc.call(this, model); + + run(model: Stub, real: Real): void { + const currentRewCycle = currentCycle(real.network); + const operatorWallet = model.stackers.get(this.operator.stxAddress)!; + const committedAmount = operatorWallet.amountToCommit; + + // Include the authorization and the `stack-aggregation-commit` transactions + // in a single block. This way we ensure both the authorization and the + // stack-aggregation-commit transactions are called during the same reward + // cycle, so the authorization currentRewCycle param is relevant for the + // upcoming stack-aggregation-commit call. + const block = real.network.mineBlock([ + tx.callPublicFn( + "ST000000000000000000002AMW42H.pox-4", + "set-signer-key-authorization", + [ + // (pox-addr (tuple (version (buff 1)) (hashbytes (buff 32)))) + poxAddressToTuple(this.operator.btcAddress), + // (period uint) + Cl.uint(1), + // (reward-cycle uint) + Cl.uint(currentRewCycle + 1), + // (topic (string-ascii 14)) + Cl.stringAscii("agg-commit"), + // (signer-key (buff 33)) + Cl.bufferFromHex(this.operator.signerPubKey), + // (allowed bool) + Cl.bool(true), + // (max-amount uint) + Cl.uint(committedAmount), + // (auth-id uint) + Cl.uint(this.authId), + ], + this.operator.stxAddress, + ), + tx.callPublicFn( + "ST000000000000000000002AMW42H.pox-4", + "stack-aggregation-commit", + [ + // (pox-addr (tuple (version (buff 1)) (hashbytes (buff 32)))) + poxAddressToTuple(this.operator.btcAddress), + // (reward-cycle uint) + Cl.uint(currentRewCycle + 1), + // (signer-sig (optional (buff 65))) + Cl.none(), + // (signer-key (buff 33)) + Cl.bufferFromHex(this.operator.signerPubKey), + // (max-amount uint) + Cl.uint(committedAmount), + // (auth-id uint) + Cl.uint(this.authId), + ], + this.operator.stxAddress, + ), + ]); + + // Assert + expect(block[0].result).toBeOk(Cl.bool(true)); + expect(block[1].result).toBeErr(Cl.int(this.errorCode)); + + // Log to console for debugging purposes. This is not necessary for the + // test to pass but it is useful for debugging and eyeballing the test. + logCommand( + `â‚¿ ${model.burnBlockHeight}`, + `✗ ${this.operator.label}`, + "stack-agg-commit", + "amount committed", + committedAmount.toString(), + "authorization", + ); + + // Refresh the model's state if the network gets to the next reward cycle. + model.refreshStateForNextRewardCycle(real); + } + + toString() { + // fast-check will call toString() in case of errors, e.g. property failed. + // It will then make a minimal counterexample, a process called 'shrinking' + // https://github.com/dubzzz/fast-check/issues/2864#issuecomment-1098002642 + return `${this.operator.label} stack-aggregation-commit auth-id ${this.authId}`; + } +} diff --git a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackAggregationCommitIndexedAuthCommand.ts b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackAggregationCommitIndexedAuthCommand.ts new file mode 100644 index 00000000000..ba9679e639f --- /dev/null +++ b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackAggregationCommitIndexedAuthCommand.ts @@ -0,0 +1,155 @@ +import { + hasLockedStackers, + isATCAboveThreshold, + logCommand, + PoxCommand, + Real, + Stub, + Wallet, +} from "./pox_CommandModel.ts"; +import { poxAddressToTuple } from "@stacks/stacking"; +import { expect } from "vitest"; +import { Cl } from "@stacks/transactions"; +import { currentCycle } from "./pox_Commands.ts"; +import { tx } from "@hirosystems/clarinet-sdk"; + +/** + * The `StackAggregationCommitIndexedAuthCommand` allows an operator to + * commit partially stacked STX & to allocate a new PoX reward address + * slot. + * This allows a stacker to lock fewer STX than the minimal threshold + * in multiple transactions, so long as: + * 1. The pox-addr is the same. + * 2. The "commit" transaction is called _before_ the PoX anchor block. + * + * This command calls `stack-aggregation-commit-indexed` using an + * `authorization`. + * + * Constraints for running this command include: + * - The Operator must have locked STX on behalf of at least one stacker. + * - The total amount previously locked by the Operator on behalf of the + * stackers has to be greater than the uSTX threshold. + */ +export class StackAggregationCommitIndexedAuthCommand implements PoxCommand { + readonly operator: Wallet; + readonly authId: number; + + /** + * Constructs a `StackAggregationCommitIndexedAuthCommand` to commit partially + * locked uSTX. + * + * @param operator - Represents the `Operator`'s wallet. + * @param authId - Unique `auth-id` for the authorization. + */ + constructor( + operator: Wallet, + authId: number, + ) { + this.operator = operator; + this.authId = authId; + } + + check(model: Readonly): boolean { + // Constraints for running this command include: + // - The Operator must have locked STX on behalf of at least one stacker. + // - The total amount previously locked by the Operator on behalf of the + // stackers has to be greater than the uSTX threshold. + + const operator = model.stackers.get(this.operator.stxAddress)!; + return ( + hasLockedStackers(operator) && + isATCAboveThreshold(operator, model) + ); + } + + run(model: Stub, real: Real): void { + model.trackCommandRun(this.constructor.name); + const currentRewCycle = currentCycle(real.network); + const operatorWallet = model.stackers.get(this.operator.stxAddress)!; + const committedAmount = operatorWallet.amountToCommit; + + // Act + + // Include the authorization and the `stack-aggregation-commit-indexed` + // transactions in a single block. This way we ensure both the authorization + // and the stack-aggregation-commit-indexed transactions are called during + // the same reward cycle, so the authorization currentRewCycle param is + // relevant for the upcoming stack-aggregation-commit-indexed call. + const block = real.network.mineBlock([ + tx.callPublicFn( + "ST000000000000000000002AMW42H.pox-4", + "set-signer-key-authorization", + [ + // (pox-addr (tuple (version (buff 1)) (hashbytes (buff 32)))) + poxAddressToTuple(this.operator.btcAddress), + // (period uint) + Cl.uint(1), + // (reward-cycle uint) + Cl.uint(currentRewCycle + 1), + // (topic (string-ascii 14)) + Cl.stringAscii("agg-commit"), + // (signer-key (buff 33)) + Cl.bufferFromHex(this.operator.signerPubKey), + // (allowed bool) + Cl.bool(true), + // (max-amount uint) + Cl.uint(committedAmount), + // (auth-id uint) + Cl.uint(this.authId), + ], + this.operator.stxAddress, + ), + tx.callPublicFn( + "ST000000000000000000002AMW42H.pox-4", + "stack-aggregation-commit-indexed", + [ + // (pox-addr (tuple (version (buff 1)) (hashbytes (buff 32)))) + poxAddressToTuple(this.operator.btcAddress), + // (reward-cycle uint) + Cl.uint(currentRewCycle + 1), + // (signer-sig (optional (buff 65))) + Cl.none(), + // (signer-key (buff 33)) + Cl.bufferFromHex(this.operator.signerPubKey), + // (max-amount uint) + Cl.uint(committedAmount), + // (auth-id uint) + Cl.uint(this.authId), + ], + this.operator.stxAddress, + ), + ]); + + // Assert + expect(block[0].result).toBeOk(Cl.bool(true)); + expect(block[1].result).toBeOk( + Cl.uint(model.nextRewardSetIndex), + ); + + // Update the model + operatorWallet.amountToCommit -= committedAmount; + operatorWallet.committedRewCycleIndexes.push(model.nextRewardSetIndex); + model.nextRewardSetIndex++; + + // Log to console for debugging purposes. This is not necessary for the + // test to pass but it is useful for debugging and eyeballing the test. + logCommand( + `â‚¿ ${model.burnBlockHeight}`, + `✓ ${this.operator.label}`, + "stack-agg-commit-indexed", + "amount committed", + committedAmount.toString(), + "authorization", + ); + + // Refresh the model's state if the network gets to the next reward cycle. + model.refreshStateForNextRewardCycle(real); + } + + toString() { + // fast-check will call toString() in case of errors, e.g. property failed. + // It will then make a minimal counterexample, a process called 'shrinking' + // https://github.com/dubzzz/fast-check/issues/2864#issuecomment-1098002642 + return `${this.operator.label} stack-aggregation-commit-indexed auth-id ${this.authId}`; + } +} diff --git a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackAggregationCommitIndexedAuthCommand_Err.ts b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackAggregationCommitIndexedAuthCommand_Err.ts new file mode 100644 index 00000000000..1c891df2700 --- /dev/null +++ b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackAggregationCommitIndexedAuthCommand_Err.ts @@ -0,0 +1,133 @@ +import { + logCommand, + PoxCommand, + Real, + Stub, + Wallet, +} from "./pox_CommandModel.ts"; +import { poxAddressToTuple } from "@stacks/stacking"; +import { expect } from "vitest"; +import { Cl } from "@stacks/transactions"; +import { currentCycle } from "./pox_Commands.ts"; +import { tx } from "@hirosystems/clarinet-sdk"; + +type CheckFunc = ( + this: StackAggregationCommitIndexedAuthCommand_Err, + model: Readonly, +) => boolean; + +export class StackAggregationCommitIndexedAuthCommand_Err + implements PoxCommand { + readonly operator: Wallet; + readonly authId: number; + readonly checkFunc: CheckFunc; + readonly errorCode: number; + + /** + * Constructs a `StackAggregationCommitIndexedAuthCommand_Err` to commit partially + * locked uSTX. + * + * @param operator - Represents the `Operator`'s wallet. + * @param authId - Unique `auth-id` for the authorization. + * @param checkFunc - A function to check constraints for running this command. + * @param errorCode - The expected error code when running this command. + */ + constructor( + operator: Wallet, + authId: number, + checkFunc: CheckFunc, + errorCode: number, + ) { + this.operator = operator; + this.authId = authId; + this.checkFunc = checkFunc; + this.errorCode = errorCode; + } + + check = (model: Readonly): boolean => this.checkFunc.call(this, model); + + run(model: Stub, real: Real): void { + const currentRewCycle = currentCycle(real.network); + const operatorWallet = model.stackers.get(this.operator.stxAddress)!; + const committedAmount = operatorWallet.amountToCommit; + + // Act + + // Include the authorization and the `stack-aggregation-commit-indexed` + // transactions in a single block. This way we ensure both the authorization + // and the stack-aggregation-commit-indexed transactions are called during + // the same reward cycle, so the authorization currentRewCycle param is + // relevant for the upcoming stack-aggregation-commit-indexed call. + const block = real.network.mineBlock([ + tx.callPublicFn( + "ST000000000000000000002AMW42H.pox-4", + "set-signer-key-authorization", + [ + // (pox-addr (tuple (version (buff 1)) (hashbytes (buff 32)))) + poxAddressToTuple(this.operator.btcAddress), + // (period uint) + Cl.uint(1), + // (reward-cycle uint) + Cl.uint(currentRewCycle + 1), + // (topic (string-ascii 14)) + Cl.stringAscii("agg-commit"), + // (signer-key (buff 33)) + Cl.bufferFromHex(this.operator.signerPubKey), + // (allowed bool) + Cl.bool(true), + // (max-amount uint) + Cl.uint(committedAmount), + // (auth-id uint) + Cl.uint(this.authId), + ], + this.operator.stxAddress, + ), + tx.callPublicFn( + "ST000000000000000000002AMW42H.pox-4", + "stack-aggregation-commit-indexed", + [ + // (pox-addr (tuple (version (buff 1)) (hashbytes (buff 32)))) + poxAddressToTuple(this.operator.btcAddress), + // (reward-cycle uint) + Cl.uint(currentRewCycle + 1), + // (signer-sig (optional (buff 65))) + Cl.none(), + // (signer-key (buff 33)) + Cl.bufferFromHex(this.operator.signerPubKey), + // (max-amount uint) + Cl.uint(committedAmount), + // (auth-id uint) + Cl.uint(this.authId), + ], + this.operator.stxAddress, + ), + ]); + + // Assert + expect(block[0].result).toBeOk(Cl.bool(true)); + expect(block[1].result).toBeErr( + Cl.int(this.errorCode), + ); + + // Log to console for debugging purposes. This is not necessary for the + // test to pass but it is useful for debugging and eyeballing the test. + logCommand( + `â‚¿ ${model.burnBlockHeight}`, + `✗ ${this.operator.label}`, + "stack-agg-commit-indexed", + "amount committed", + committedAmount.toString(), + "authorization", + ); + + // Refresh the model's state if the network gets to the next reward cycle. + model.refreshStateForNextRewardCycle(real); + } + + toString() { + // fast-check will call toString() in case of errors, e.g. property failed. + // It will then make a minimal counterexample, a process called 'shrinking' + // https://github.com/dubzzz/fast-check/issues/2864#issuecomment-1098002642 + return `${this.operator.label} stack-aggregation-commit-indexed auth-id ${this.authId}`; + } +} diff --git a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackAggregationCommitIndexedSigCommand.ts b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackAggregationCommitIndexedSigCommand.ts new file mode 100644 index 00000000000..beb91ea87f9 --- /dev/null +++ b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackAggregationCommitIndexedSigCommand.ts @@ -0,0 +1,147 @@ +import { + hasLockedStackers, + isATCAboveThreshold, + logCommand, + PoxCommand, + Real, + Stub, + Wallet, +} from "./pox_CommandModel.ts"; +import { Pox4SignatureTopic, poxAddressToTuple } from "@stacks/stacking"; +import { expect } from "vitest"; +import { Cl } from "@stacks/transactions"; +import { bufferFromHex } from "@stacks/transactions/dist/cl"; +import { currentCycle } from "./pox_Commands.ts"; + +/** + * The `StackAggregationCommitIndexedSigCommand` allows an operator to + * commit partially stacked STX & to allocate a new PoX reward address + * slot. + * This allows a stacker to lock fewer STX than the minimal threshold + * in multiple transactions, so long as: + * 1. The pox-addr is the same. + * 2. The "commit" transaction is called _before_ the PoX anchor block. + * + * This command calls `stack-aggregation-commit-indexed` using a + * `signature`. + * + * Constraints for running this command include: + * - The Operator must have locked STX on behalf of at least one stacker. + * - The total amount previously locked by the Operator on behalf of the + * stackers has to be greater than the uSTX threshold. + */ +export class StackAggregationCommitIndexedSigCommand implements PoxCommand { + readonly operator: Wallet; + readonly authId: number; + + /** + * Constructs a `StackAggregationCommitIndexedSigCommand` to commit partially + * locked uSTX. + * + * @param operator - Represents the `Operator`'s wallet. + * @param authId - Unique `auth-id` for the authorization. + */ + constructor( + operator: Wallet, + authId: number, + ) { + this.operator = operator; + this.authId = authId; + } + + check(model: Readonly): boolean { + // Constraints for running this command include: + // - The Operator must have locked STX on behalf of at least one stacker. + // - The total amount previously locked by the Operator on behalf of the + // stackers has to be greater than the uSTX threshold. + + const operator = model.stackers.get(this.operator.stxAddress)!; + return ( + hasLockedStackers(operator) && + isATCAboveThreshold(operator, model) + ); + } + + run(model: Stub, real: Real): void { + model.trackCommandRun(this.constructor.name); + const currentRewCycle = currentCycle(real.network); + const operatorWallet = model.stackers.get(this.operator.stxAddress)!; + const committedAmount = operatorWallet.amountToCommit; + + const signerSig = this.operator.stackingClient.signPoxSignature({ + // The signer key being authorized. + signerPrivateKey: this.operator.signerPrvKey, + // The reward cycle for which the authorization is valid. + // For stack-stx and stack-extend, this refers to the reward cycle + // where the transaction is confirmed. For stack-aggregation-commit, + // this refers to the reward cycle argument in that function. + rewardCycle: currentRewCycle + 1, + // For stack-stx, this refers to lock-period. For stack-extend, + // this refers to extend-count. For stack-aggregation-commit, this is + // u1. + period: 1, + // A string representing the function where this authorization is valid. + // Either stack-stx, stack-extend, stack-increase or agg-commit. + topic: Pox4SignatureTopic.AggregateCommit, + // The PoX address that can be used with this signer key. + poxAddress: this.operator.btcAddress, + // The unique auth-id for this authorization. + authId: this.authId, + // The maximum amount of uSTX that can be used (per tx) with this signer + // key. + maxAmount: committedAmount, + }); + + // Act + const stackAggregationCommitIndexed = real.network.callPublicFn( + "ST000000000000000000002AMW42H.pox-4", + "stack-aggregation-commit-indexed", + [ + // (pox-addr (tuple (version (buff 1)) (hashbytes (buff 32)))) + poxAddressToTuple(this.operator.btcAddress), + // (reward-cycle uint) + Cl.uint(currentRewCycle + 1), + // (signer-sig (optional (buff 65))) + Cl.some(bufferFromHex(signerSig)), + // (signer-key (buff 33)) + Cl.bufferFromHex(this.operator.signerPubKey), + // (max-amount uint) + Cl.uint(committedAmount), + // (auth-id uint) + Cl.uint(this.authId), + ], + this.operator.stxAddress, + ); + + // Assert + expect(stackAggregationCommitIndexed.result).toBeOk( + Cl.uint(model.nextRewardSetIndex), + ); + + // Update the model + operatorWallet.amountToCommit -= committedAmount; + operatorWallet.committedRewCycleIndexes.push(model.nextRewardSetIndex); + model.nextRewardSetIndex++; + + // Log to console for debugging purposes. This is not necessary for the + // test to pass but it is useful for debugging and eyeballing the test. + logCommand( + `â‚¿ ${model.burnBlockHeight}`, + `✓ ${this.operator.label}`, + "stack-agg-commit-indexed", + "amount committed", + committedAmount.toString(), + "signature", + ); + + // Refresh the model's state if the network gets to the next reward cycle. + model.refreshStateForNextRewardCycle(real); + } + + toString() { + // fast-check will call toString() in case of errors, e.g. property failed. + // It will then make a minimal counterexample, a process called 'shrinking' + // https://github.com/dubzzz/fast-check/issues/2864#issuecomment-1098002642 + return `${this.operator.label} stack-aggregation-commit-indexed auth-id ${this.authId}`; + } +} diff --git a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackAggregationCommitIndexedSigCommand_Err.ts b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackAggregationCommitIndexedSigCommand_Err.ts new file mode 100644 index 00000000000..045142f3b42 --- /dev/null +++ b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackAggregationCommitIndexedSigCommand_Err.ts @@ -0,0 +1,124 @@ +import { + logCommand, + PoxCommand, + Real, + Stub, + Wallet, +} from "./pox_CommandModel.ts"; +import { Pox4SignatureTopic, poxAddressToTuple } from "@stacks/stacking"; +import { expect } from "vitest"; +import { Cl } from "@stacks/transactions"; +import { bufferFromHex } from "@stacks/transactions/dist/cl"; +import { currentCycle } from "./pox_Commands.ts"; + +type CheckFunc = ( + this: StackAggregationCommitIndexedSigCommand_Err, + model: Readonly, +) => boolean; + +export class StackAggregationCommitIndexedSigCommand_Err implements PoxCommand { + readonly operator: Wallet; + readonly authId: number; + readonly checkFunc: CheckFunc; + readonly errorCode: number; + + /** + * Constructs a `StackAggregationCommitIndexedSigCommand_Err` to commit partially + * locked uSTX. + * + * @param operator - Represents the `Operator`'s wallet. + * @param authId - Unique `auth-id` for the authorization. + * @param checkFunc - A function to check constraints for running this command. + * @param errorCode - The expected error code when running this command. + */ + constructor( + operator: Wallet, + authId: number, + checkFunc: CheckFunc, + errorCode: number, + ) { + this.operator = operator; + this.authId = authId; + this.checkFunc = checkFunc; + this.errorCode = errorCode; + } + + check = (model: Readonly): boolean => this.checkFunc.call(this, model); + + run(model: Stub, real: Real): void { + const currentRewCycle = currentCycle(real.network); + const operatorWallet = model.stackers.get(this.operator.stxAddress)!; + const committedAmount = operatorWallet.amountToCommit; + + const signerSig = this.operator.stackingClient.signPoxSignature({ + // The signer key being authorized. + signerPrivateKey: this.operator.signerPrvKey, + // The reward cycle for which the authorization is valid. + // For stack-stx and stack-extend, this refers to the reward cycle + // where the transaction is confirmed. For stack-aggregation-commit, + // this refers to the reward cycle argument in that function. + rewardCycle: currentRewCycle + 1, + // For stack-stx, this refers to lock-period. For stack-extend, + // this refers to extend-count. For stack-aggregation-commit, this is + // u1. + period: 1, + // A string representing the function where this authorization is valid. + // Either stack-stx, stack-extend, stack-increase or agg-commit. + topic: Pox4SignatureTopic.AggregateCommit, + // The PoX address that can be used with this signer key. + poxAddress: this.operator.btcAddress, + // The unique auth-id for this authorization. + authId: this.authId, + // The maximum amount of uSTX that can be used (per tx) with this signer + // key. + maxAmount: committedAmount, + }); + + // Act + const stackAggregationCommitIndexed = real.network.callPublicFn( + "ST000000000000000000002AMW42H.pox-4", + "stack-aggregation-commit-indexed", + [ + // (pox-addr (tuple (version (buff 1)) (hashbytes (buff 32)))) + poxAddressToTuple(this.operator.btcAddress), + // (reward-cycle uint) + Cl.uint(currentRewCycle + 1), + // (signer-sig (optional (buff 65))) + Cl.some(bufferFromHex(signerSig)), + // (signer-key (buff 33)) + Cl.bufferFromHex(this.operator.signerPubKey), + // (max-amount uint) + Cl.uint(committedAmount), + // (auth-id uint) + Cl.uint(this.authId), + ], + this.operator.stxAddress, + ); + + // Assert + expect(stackAggregationCommitIndexed.result).toBeErr( + Cl.int(this.errorCode), + ); + + // Log to console for debugging purposes. This is not necessary for the + // test to pass but it is useful for debugging and eyeballing the test. + logCommand( + `â‚¿ ${model.burnBlockHeight}`, + `✗ ${this.operator.label}`, + "stack-agg-commit-indexed", + "amount committed", + committedAmount.toString(), + "authorization", + ); + + // Refresh the model's state if the network gets to the next reward cycle. + model.refreshStateForNextRewardCycle(real); + } + + toString() { + // fast-check will call toString() in case of errors, e.g. property failed. + // It will then make a minimal counterexample, a process called 'shrinking' + // https://github.com/dubzzz/fast-check/issues/2864#issuecomment-1098002642 + return `${this.operator.label} stack-aggregation-commit-indexed auth-id ${this.authId}`; + } +} diff --git a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackAggregationCommitSigCommand.ts b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackAggregationCommitSigCommand.ts new file mode 100644 index 00000000000..9e6bfd1bde6 --- /dev/null +++ b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackAggregationCommitSigCommand.ts @@ -0,0 +1,142 @@ +import { + hasLockedStackers, + isATCAboveThreshold, + logCommand, + PoxCommand, + Real, + Stub, + Wallet, +} from "./pox_CommandModel.ts"; +import { Pox4SignatureTopic, poxAddressToTuple } from "@stacks/stacking"; +import { expect } from "vitest"; +import { Cl } from "@stacks/transactions"; +import { bufferFromHex } from "@stacks/transactions/dist/cl"; +import { currentCycle } from "./pox_Commands.ts"; + +/** + * The `StackAggregationCommitSigCommand` allows an operator to commit + * partially stacked STX & to allocate a new PoX reward address slot. + * This allows a stacker to lock fewer STX than the minimal threshold + * in multiple transactions, so long as: + * 1. The pox-addr is the same. + * 2. This "commit" transaction is called _before_ the PoX anchor block. + * + * This command calls `stack-aggregation-commit` using a `signature`. + * + * Constraints for running this command include: + * - The Operator must have locked STX on behalf of at least one stacker. + * - The total amount previously locked by the Operator on behalf of the + * stackers has to be greater than the uSTX threshold. + */ +export class StackAggregationCommitSigCommand implements PoxCommand { + readonly operator: Wallet; + readonly authId: number; + + /** + * Constructs a `StackAggregationCommitSigCommand` to commit partially + * locked uSTX. + * + * @param operator - Represents the `Operator`'s wallet. + * @param authId - Unique `auth-id` for the authorization. + */ + constructor( + operator: Wallet, + authId: number, + ) { + this.operator = operator; + this.authId = authId; + } + + check(model: Readonly): boolean { + // Constraints for running this command include: + // - The Operator must have locked STX on behalf of at least one stacker. + // - The total amount previously locked by the Operator on behalf of the + // stackers has to be greater than the uSTX threshold. + + const operator = model.stackers.get(this.operator.stxAddress)!; + return ( + hasLockedStackers(operator) && + isATCAboveThreshold(operator, model) + ); + } + + run(model: Stub, real: Real): void { + model.trackCommandRun(this.constructor.name); + const currentRewCycle = currentCycle(real.network); + const operatorWallet = model.stackers.get(this.operator.stxAddress)!; + const committedAmount = operatorWallet.amountToCommit; + + const signerSig = this.operator.stackingClient.signPoxSignature({ + // The signer key being authorized. + signerPrivateKey: this.operator.signerPrvKey, + // The reward cycle for which the authorization is valid. + // For stack-stx and stack-extend, this refers to the reward cycle + // where the transaction is confirmed. For stack-aggregation-commit, + // this refers to the reward cycle argument in that function. + rewardCycle: currentRewCycle + 1, + // For stack-stx, this refers to lock-period. For stack-extend, + // this refers to extend-count. For stack-aggregation-commit, this is + // u1. + period: 1, + // A string representing the function where this authorization is valid. + // Either stack-stx, stack-extend, stack-increase or agg-commit. + topic: Pox4SignatureTopic.AggregateCommit, + // The PoX address that can be used with this signer key. + poxAddress: this.operator.btcAddress, + // The unique auth-id for this authorization. + authId: this.authId, + // The maximum amount of uSTX that can be used (per tx) with this signer + // key. + maxAmount: committedAmount, + }); + + // Act + const stackAggregationCommit = real.network.callPublicFn( + "ST000000000000000000002AMW42H.pox-4", + "stack-aggregation-commit", + [ + // (pox-addr (tuple (version (buff 1)) (hashbytes (buff 32)))) + poxAddressToTuple(this.operator.btcAddress), + // (reward-cycle uint) + Cl.uint(currentRewCycle + 1), + // (signer-sig (optional (buff 65))) + Cl.some(bufferFromHex(signerSig)), + // (signer-key (buff 33)) + Cl.bufferFromHex(this.operator.signerPubKey), + // (max-amount uint) + Cl.uint(committedAmount), + // (auth-id uint) + Cl.uint(this.authId), + ], + this.operator.stxAddress, + ); + + // Assert + expect(stackAggregationCommit.result).toBeOk(Cl.bool(true)); + + operatorWallet.amountToCommit -= committedAmount; + operatorWallet.committedRewCycleIndexes.push(model.nextRewardSetIndex); + model.nextRewardSetIndex++; + + // Log to console for debugging purposes. This is not necessary for the + // test to pass but it is useful for debugging and eyeballing the test. + logCommand( + `â‚¿ ${model.burnBlockHeight}`, + `✓ ${this.operator.label}`, + "stack-agg-commit", + "amount committed", + committedAmount.toString(), + "signature", + ); + + // Refresh the model's state if the network gets to the next reward cycle. + model.refreshStateForNextRewardCycle(real); + } + + toString() { + // fast-check will call toString() in case of errors, e.g. property failed. + // It will then make a minimal counterexample, a process called 'shrinking' + // https://github.com/dubzzz/fast-check/issues/2864#issuecomment-1098002642 + return `${this.operator.label} stack-aggregation-commit auth-id ${this.authId}`; + } +} diff --git a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackAggregationCommitSigCommand_Err.ts b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackAggregationCommitSigCommand_Err.ts new file mode 100644 index 00000000000..1238a4f32b9 --- /dev/null +++ b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackAggregationCommitSigCommand_Err.ts @@ -0,0 +1,122 @@ +import { + logCommand, + PoxCommand, + Real, + Stub, + Wallet, +} from "./pox_CommandModel.ts"; +import { Pox4SignatureTopic, poxAddressToTuple } from "@stacks/stacking"; +import { expect } from "vitest"; +import { Cl } from "@stacks/transactions"; +import { bufferFromHex } from "@stacks/transactions/dist/cl"; +import { currentCycle } from "./pox_Commands.ts"; + +type CheckFunc = ( + this: StackAggregationCommitSigCommand_Err, + model: Readonly, +) => boolean; + +export class StackAggregationCommitSigCommand_Err implements PoxCommand { + readonly operator: Wallet; + readonly authId: number; + readonly checkFunc: CheckFunc; + readonly errorCode: number; + + /** + * Constructs a `StackAggregationCommitAuthCommand_Err` to commit partially + * locked uSTX. + * + * @param operator - Represents the `Operator`'s wallet. + * @param authId - Unique `auth-id` for the authorization. + * @param checkFunc - A function to check constraints for running this command. + * @param errorCode - The expected error code when running this command. + */ + constructor( + operator: Wallet, + authId: number, + checkFunc: CheckFunc, + errorCode: number, + ) { + this.operator = operator; + this.authId = authId; + this.checkFunc = checkFunc; + this.errorCode = errorCode; + } + + check = (model: Readonly): boolean => this.checkFunc.call(this, model); + + run(model: Stub, real: Real): void { + const currentRewCycle = currentCycle(real.network); + const operatorWallet = model.stackers.get(this.operator.stxAddress)!; + const committedAmount = operatorWallet.amountToCommit; + + const signerSig = this.operator.stackingClient.signPoxSignature({ + // The signer key being authorized. + signerPrivateKey: this.operator.signerPrvKey, + // The reward cycle for which the authorization is valid. + // For stack-stx and stack-extend, this refers to the reward cycle + // where the transaction is confirmed. For stack-aggregation-commit, + // this refers to the reward cycle argument in that function. + rewardCycle: currentRewCycle + 1, + // For stack-stx, this refers to lock-period. For stack-extend, + // this refers to extend-count. For stack-aggregation-commit, this is + // u1. + period: 1, + // A string representing the function where this authorization is valid. + // Either stack-stx, stack-extend, stack-increase or agg-commit. + topic: Pox4SignatureTopic.AggregateCommit, + // The PoX address that can be used with this signer key. + poxAddress: this.operator.btcAddress, + // The unique auth-id for this authorization. + authId: this.authId, + // The maximum amount of uSTX that can be used (per tx) with this signer + // key. + maxAmount: committedAmount, + }); + + // Act + const stackAggregationCommit = real.network.callPublicFn( + "ST000000000000000000002AMW42H.pox-4", + "stack-aggregation-commit", + [ + // (pox-addr (tuple (version (buff 1)) (hashbytes (buff 32)))) + poxAddressToTuple(this.operator.btcAddress), + // (reward-cycle uint) + Cl.uint(currentRewCycle + 1), + // (signer-sig (optional (buff 65))) + Cl.some(bufferFromHex(signerSig)), + // (signer-key (buff 33)) + Cl.bufferFromHex(this.operator.signerPubKey), + // (max-amount uint) + Cl.uint(committedAmount), + // (auth-id uint) + Cl.uint(this.authId), + ], + this.operator.stxAddress, + ); + + // Assert + expect(stackAggregationCommit.result).toBeErr(Cl.int(this.errorCode)); + + // Log to console for debugging purposes. This is not necessary for the + // test to pass but it is useful for debugging and eyeballing the test. + logCommand( + `â‚¿ ${model.burnBlockHeight}`, + `✗ ${this.operator.label}`, + "stack-agg-commit", + "amount committed", + committedAmount.toString(), + "signature", + ); + + // Refresh the model's state if the network gets to the next reward cycle. + model.refreshStateForNextRewardCycle(real); + } + + toString() { + // fast-check will call toString() in case of errors, e.g. property failed. + // It will then make a minimal counterexample, a process called 'shrinking' + // https://github.com/dubzzz/fast-check/issues/2864#issuecomment-1098002642 + return `${this.operator.label} stack-aggregation-commit auth-id ${this.authId}`; + } +} diff --git a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackAggregationIncreaseCommand.ts b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackAggregationIncreaseCommand.ts new file mode 100644 index 00000000000..80e1950abbf --- /dev/null +++ b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackAggregationIncreaseCommand.ts @@ -0,0 +1,162 @@ +import { + hasLockedStackers, + isATCPositive, + isPositive, + logCommand, + PoxCommand, + Real, + Stub, + Wallet, +} from "./pox_CommandModel.ts"; +import { Pox4SignatureTopic, poxAddressToTuple } from "@stacks/stacking"; +import { expect } from "vitest"; +import { Cl, cvToJSON } from "@stacks/transactions"; +import { bufferFromHex } from "@stacks/transactions/dist/cl"; +import { currentCycle } from "./pox_Commands.ts"; + +/** + * The `StackAggregationIncreaseCommand` allows an operator to commit + * partially stacked STX to a PoX address which has already received + * some STX (more than the `stacking minimum`). + * This allows a delegator to lock up marginally more STX from new + * delegates, even if they collectively do not exceed the Stacking + * minimum, so long as the target PoX address already represents at + * least as many STX as the `stacking minimum`. + * This command calls stack-aggregation-increase. + * + * Constraints for running this command include: + * - The Operator must have locked STX on behalf of at least one stacker. + * - The PoX address must have partial committed STX. + * - The Reward Cycle Index must be positive. + */ +export class StackAggregationIncreaseCommand implements PoxCommand { + readonly operator: Wallet; + readonly rewardCycleIndex: number; + readonly authId: number; + + /** + * Constructs a `StackAggregationIncreaseCommand` to commit partially + * stacked STX to a PoX address which has already received some STX. + * + * @param operator - Represents the `Operator`'s wallet. + * @param rewardCycleIndex - The cycle index to increase the commit for. + * @param authId - Unique `auth-id` for the authorization. + */ + constructor( + operator: Wallet, + rewardCycleIndex: number, + authId: number, + ) { + this.operator = operator; + this.rewardCycleIndex = rewardCycleIndex; + this.authId = authId; + } + + check(model: Readonly): boolean { + // Constraints for running this command include: + // - The Operator must have locked STX on behalf of at least one stacker. + // - The PoX address must have partial committed STX. + // - The Reward Cycle Index must be positive. + const operator = model.stackers.get(this.operator.stxAddress)!; + return ( + hasLockedStackers(operator) && + isPositive(this.rewardCycleIndex) && + isATCPositive(operator) + ); + } + + run(model: Stub, real: Real): void { + model.trackCommandRun(this.constructor.name); + const currentRewCycle = currentCycle(real.network); + + const operatorWallet = model.stackers.get(this.operator.stxAddress)!; + const committedAmount = operatorWallet.amountToCommit; + + const existingEntryCV = real.network.getMapEntry( + "ST000000000000000000002AMW42H.pox-4", + "reward-cycle-pox-address-list", + Cl.tuple({ + "index": Cl.uint(this.rewardCycleIndex), + "reward-cycle": Cl.uint(currentRewCycle + 1), + }), + ); + + const totalStackedBefore = + cvToJSON(existingEntryCV).value.value["total-ustx"].value; + const maxAmount = committedAmount + Number(totalStackedBefore); + + const signerSig = this.operator.stackingClient.signPoxSignature({ + // The signer key being authorized. + signerPrivateKey: this.operator.signerPrvKey, + // The reward cycle for which the authorization is valid. + // For stack-stx and stack-extend, this refers to the reward cycle + // where the transaction is confirmed. For stack-aggregation-commit, + // this refers to the reward cycle argument in that function. + rewardCycle: currentRewCycle + 1, + // For stack-stx, this refers to lock-period. For stack-extend, + // this refers to extend-count. For stack-aggregation-commit, this is + // u1. + period: 1, + // A string representing the function where this authorization is valid. + // Either stack-stx, stack-extend, stack-increase, agg-commit or agg-increase. + topic: Pox4SignatureTopic.AggregateIncrease, + // The PoX address that can be used with this signer key. + poxAddress: this.operator.btcAddress, + // The unique auth-id for this authorization. + authId: this.authId, + // The maximum amount of uSTX that can be used (per tx) with this signer + // key. + maxAmount: maxAmount, + }); + + // Act + const stackAggregationIncrease = real.network.callPublicFn( + "ST000000000000000000002AMW42H.pox-4", + "stack-aggregation-increase", + [ + // (pox-addr { version: (buff 1), hashbytes: (buff 32) }) + poxAddressToTuple(this.operator.btcAddress), + // (reward-cycle uint) + Cl.uint(currentRewCycle + 1), + // (reward-cycle-index uint)) + Cl.uint(this.rewardCycleIndex), + // (signer-sig (optional (buff 65))) + Cl.some(bufferFromHex(signerSig)), + // (signer-key (buff 33)) + Cl.bufferFromHex(this.operator.signerPubKey), + // (max-amount uint) + Cl.uint(maxAmount), + // (auth-id uint) + Cl.uint(this.authId), + ], + this.operator.stxAddress, + ); + + // Assert + expect(stackAggregationIncrease.result).toBeOk(Cl.bool(true)); + + operatorWallet.amountToCommit -= committedAmount; + + // Log to console for debugging purposes. This is not necessary for the + // test to pass but it is useful for debugging and eyeballing the test. + logCommand( + `â‚¿ ${model.burnBlockHeight}`, + `✓ ${this.operator.label}`, + "stack-agg-increase", + "amount committed", + committedAmount.toString(), + "cycle index", + this.rewardCycleIndex.toString(), + ); + + // Refresh the model's state if the network gets to the next reward cycle. + model.refreshStateForNextRewardCycle(real); + } + + toString() { + // fast-check will call toString() in case of errors, e.g. property failed. + // It will then make a minimal counterexample, a process called 'shrinking' + // https://github.com/dubzzz/fast-check/issues/2864#issuecomment-1098002642 + return `${this.operator.label} stack-aggregation-increase for index ${this.rewardCycleIndex}`; + } +} diff --git a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackAggregationIncreaseCommand_Err.ts b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackAggregationIncreaseCommand_Err.ts new file mode 100644 index 00000000000..26fc49eb608 --- /dev/null +++ b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackAggregationIncreaseCommand_Err.ts @@ -0,0 +1,143 @@ +import { + logCommand, + PoxCommand, + Real, + Stub, + Wallet, +} from "./pox_CommandModel.ts"; +import { Pox4SignatureTopic, poxAddressToTuple } from "@stacks/stacking"; +import { expect } from "vitest"; +import { Cl, cvToJSON } from "@stacks/transactions"; +import { bufferFromHex } from "@stacks/transactions/dist/cl"; +import { currentCycle } from "./pox_Commands.ts"; + +type CheckFunc = ( + this: StackAggregationIncreaseCommand_Err, + model: Readonly, +) => boolean; + +export class StackAggregationIncreaseCommand_Err implements PoxCommand { + readonly operator: Wallet; + readonly rewardCycleIndex: number; + readonly authId: number; + readonly checkFunc: CheckFunc; + readonly errorCode: number; + + /** + * Constructs a `StackAggregationIncreaseCommand_Err` to commit partially + * stacked STX to a PoX address which has already received some STX. + * + * @param operator - Represents the `Operator`'s wallet. + * @param rewardCycleIndex - The cycle index to increase the commit for. + * @param authId - Unique `auth-id` for the authorization. + * @param checkFunc - A function to check constraints for running this command. + * @param errorCode - The expected error code when running this command. + */ + constructor( + operator: Wallet, + rewardCycleIndex: number, + authId: number, + checkFunc: CheckFunc, + errorCode: number, + ) { + this.operator = operator; + this.rewardCycleIndex = rewardCycleIndex; + this.authId = authId; + this.checkFunc = checkFunc; + this.errorCode = errorCode; + } + + check = (model: Readonly): boolean => this.checkFunc.call(this, model); + + run(model: Stub, real: Real): void { + const currentRewCycle = currentCycle(real.network); + + const operatorWallet = model.stackers.get(this.operator.stxAddress)!; + const committedAmount = operatorWallet.amountToCommit; + + const existingEntryCV = real.network.getMapEntry( + "ST000000000000000000002AMW42H.pox-4", + "reward-cycle-pox-address-list", + Cl.tuple({ + index: Cl.uint(this.rewardCycleIndex), + "reward-cycle": Cl.uint(currentRewCycle + 1), + }), + ); + + const totalStackedBefore = + cvToJSON(existingEntryCV).value.value["total-ustx"].value; + const maxAmount = committedAmount + Number(totalStackedBefore); + + const signerSig = this.operator.stackingClient.signPoxSignature({ + // The signer key being authorized. + signerPrivateKey: this.operator.signerPrvKey, + // The reward cycle for which the authorization is valid. + // For stack-stx and stack-extend, this refers to the reward cycle + // where the transaction is confirmed. For stack-aggregation-commit, + // this refers to the reward cycle argument in that function. + rewardCycle: currentRewCycle + 1, + // For stack-stx, this refers to lock-period. For stack-extend, + // this refers to extend-count. For stack-aggregation-commit, this is + // u1. + period: 1, + // A string representing the function where this authorization is valid. + // Either stack-stx, stack-extend, stack-increase, agg-commit or agg-increase. + topic: Pox4SignatureTopic.AggregateIncrease, + // The PoX address that can be used with this signer key. + poxAddress: this.operator.btcAddress, + // The unique auth-id for this authorization. + authId: this.authId, + // The maximum amount of uSTX that can be used (per tx) with this signer + // key. + maxAmount: maxAmount, + }); + + // Act + const stackAggregationIncrease = real.network.callPublicFn( + "ST000000000000000000002AMW42H.pox-4", + "stack-aggregation-increase", + [ + // (pox-addr { version: (buff 1), hashbytes: (buff 32) }) + poxAddressToTuple(this.operator.btcAddress), + // (reward-cycle uint) + Cl.uint(currentRewCycle + 1), + // (reward-cycle-index uint)) + Cl.uint(this.rewardCycleIndex), + // (signer-sig (optional (buff 65))) + Cl.some(bufferFromHex(signerSig)), + // (signer-key (buff 33)) + Cl.bufferFromHex(this.operator.signerPubKey), + // (max-amount uint) + Cl.uint(maxAmount), + // (auth-id uint) + Cl.uint(this.authId), + ], + this.operator.stxAddress, + ); + + // Assert + expect(stackAggregationIncrease.result).toBeErr(Cl.int(this.errorCode)); + + // Log to console for debugging purposes. This is not necessary for the + // test to pass but it is useful for debugging and eyeballing the test. + logCommand( + `â‚¿ ${model.burnBlockHeight}`, + `✗ ${this.operator.label}`, + "stack-agg-increase", + "amount committed", + committedAmount.toString(), + "cycle index", + this.rewardCycleIndex.toString(), + ); + + // Refresh the model's state if the network gets to the next reward cycle. + model.refreshStateForNextRewardCycle(real); + } + + toString() { + // fast-check will call toString() in case of errors, e.g. property failed. + // It will then make a minimal counterexample, a process called 'shrinking' + // https://github.com/dubzzz/fast-check/issues/2864#issuecomment-1098002642 + return `${this.operator.label} stack-aggregation-increase for index ${this.rewardCycleIndex}`; + } +} diff --git a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackExtendAuthCommand.ts b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackExtendAuthCommand.ts new file mode 100644 index 00000000000..203bef86b93 --- /dev/null +++ b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackExtendAuthCommand.ts @@ -0,0 +1,198 @@ +import { poxAddressToTuple } from "@stacks/stacking"; +import { + hasPoolMembers, + isAmountLockedPositive, + isPeriodWithinMax, + isDelegating, + isStacking, + isStackingSolo, + isStackingMinimumCalculated, + logCommand, + PoxCommand, + Real, + Stub, + Wallet, +} from "./pox_CommandModel"; +import { + currentCycle, + FIRST_BURNCHAIN_BLOCK_HEIGHT, + REWARD_CYCLE_LENGTH, +} from "./pox_Commands"; +import { Cl, ClarityType, isClarityType } from "@stacks/transactions"; +import { assert, expect } from "vitest"; +import { tx } from "@hirosystems/clarinet-sdk"; + +export class StackExtendAuthCommand implements PoxCommand { + readonly wallet: Wallet; + readonly extendCount: number; + readonly authId: number; + readonly currentCycle: number; + + /** + * Constructs a `StackExtendAuthCommand` to extend an active stacking lock. + * + * This command calls `stack-extend` using an `authorization`. + * + * @param wallet - Represents the Stacker's wallet. + * @param extendCount - Represents the cycles to extend the stack with. + * @param authId - Unique auth-id for the authorization. + * @param currentCycle - Represents the current PoX reward cycle. + * + * Constraints for running this command include: + * - The Stacker must have locked uSTX. + * - The Stacker must be stacking solo. + * - The Stacker must not have delegated to a pool. + * - The new lock period must be less than or equal to 12. + */ + constructor( + wallet: Wallet, + extendCount: number, + authId: number, + currentCycle: number, + ) { + this.wallet = wallet; + this.extendCount = extendCount; + this.authId = authId; + this.currentCycle = currentCycle; + } + + check(model: Readonly): boolean { + // Constraints for running this command include: + // - The Stacker must have locked uSTX. + // - The Stacker must be stacking solo. + // - The Stacker must not have delegated to a pool. + // - The new lock period must be less than or equal to 12. + const stacker = model.stackers.get(this.wallet.stxAddress)!; + + const firstRewardCycle = Math.max( + stacker.firstLockedRewardCycle, + this.currentCycle, + ); + const firstExtendCycle = Math.floor( + (stacker.unlockHeight - FIRST_BURNCHAIN_BLOCK_HEIGHT) / + REWARD_CYCLE_LENGTH, + ); + const lastExtendCycle = firstExtendCycle + this.extendCount - 1; + const totalPeriod = lastExtendCycle - firstRewardCycle + 1; + + return ( + isStackingMinimumCalculated(model) && + isStacking(stacker) && + isStackingSolo(stacker) && + !isDelegating(stacker) && + isAmountLockedPositive(stacker) && + !hasPoolMembers(stacker) && + isPeriodWithinMax(totalPeriod) + ); + } + + run(model: Stub, real: Real): void { + model.trackCommandRun(this.constructor.name); + const currentRewCycle = currentCycle(real.network); + + const stacker = model.stackers.get(this.wallet.stxAddress)!; + + const { result: firstExtendCycle } = real.network.callReadOnlyFn( + "ST000000000000000000002AMW42H.pox-4", + "burn-height-to-reward-cycle", + [Cl.uint(stacker.unlockHeight)], + this.wallet.stxAddress, + ); + assert(isClarityType(firstExtendCycle, ClarityType.UInt)); + + const lastExtendCycle = Number(firstExtendCycle.value) + this.extendCount - + 1; + + const { result: extendedUnlockHeight } = real.network.callReadOnlyFn( + "ST000000000000000000002AMW42H.pox-4", + "reward-cycle-to-burn-height", + [Cl.uint(lastExtendCycle + 1)], + this.wallet.stxAddress, + ); + assert(isClarityType(extendedUnlockHeight, ClarityType.UInt)); + + const newUnlockHeight = extendedUnlockHeight.value; + + // Include the authorization and the `stack-extend` transactions in a single + // block. This way we ensure both the authorization and the stack-extend + // transactions are called during the same reward cycle, so the authorization + // currentRewCycle param is relevant for the upcoming stack-extend call. + const block = real.network.mineBlock([ + tx.callPublicFn( + "ST000000000000000000002AMW42H.pox-4", + "set-signer-key-authorization", + [ + // (pox-addr (tuple (version (buff 1)) (hashbytes (buff 32)))) + poxAddressToTuple(this.wallet.btcAddress), + // (period uint) + Cl.uint(this.extendCount), + // (reward-cycle uint) + Cl.uint(currentRewCycle), + // (topic (string-ascii 14)) + Cl.stringAscii("stack-extend"), + // (signer-key (buff 33)) + Cl.bufferFromHex(this.wallet.signerPubKey), + // (allowed bool) + Cl.bool(true), + // (max-amount uint) + Cl.uint(stacker.amountLocked), + // (auth-id uint) + Cl.uint(this.authId), + ], + this.wallet.stxAddress, + ), + tx.callPublicFn( + "ST000000000000000000002AMW42H.pox-4", + "stack-extend", + [ + // (extend-count uint) + Cl.uint(this.extendCount), + // (pox-addr { version: (buff 1), hashbytes: (buff 32) }) + poxAddressToTuple(this.wallet.btcAddress), + // (signer-sig (optional (buff 65))) + Cl.none(), + // (signer-key (buff 33)) + Cl.bufferFromHex(this.wallet.signerPubKey), + // (max-amount uint) + Cl.uint(stacker.amountLocked), + // (auth-id uint) + Cl.uint(this.authId), + ], + this.wallet.stxAddress, + ), + ]); + + expect(block[0].result).toBeOk(Cl.bool(true)); + expect(block[1].result).toBeOk( + Cl.tuple({ + stacker: Cl.principal(this.wallet.stxAddress), + "unlock-burn-height": Cl.uint(newUnlockHeight), + }), + ); + + // Get the wallet from the model and update it with the new state. + const wallet = model.stackers.get(this.wallet.stxAddress)!; + // Update model so that we know this wallet's unlock height was extended. + wallet.unlockHeight = Number(newUnlockHeight); + + // Log to console for debugging purposes. This is not necessary for the + // test to pass but it is useful for debugging and eyeballing the test. + logCommand( + `â‚¿ ${model.burnBlockHeight}`, + `✓ ${this.wallet.label}`, + "stack-extend-auth", + "extend-count", + this.extendCount.toString(), + ); + + // Refresh the model's state if the network gets to the next reward cycle. + model.refreshStateForNextRewardCycle(real); + } + + toString() { + // fast-check will call toString() in case of errors, e.g. property failed. + // It will then make a minimal counterexample, a process called 'shrinking' + // https://github.com/dubzzz/fast-check/issues/2864#issuecomment-1098002642 + return `${this.wallet.label} stack-extend auth extend-count ${this.extendCount}`; + } +} diff --git a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackExtendAuthCommand_Err.ts b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackExtendAuthCommand_Err.ts new file mode 100644 index 00000000000..46b8ce173e1 --- /dev/null +++ b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackExtendAuthCommand_Err.ts @@ -0,0 +1,127 @@ +import { poxAddressToTuple } from "@stacks/stacking"; +import { logCommand, PoxCommand, Real, Stub, Wallet } from "./pox_CommandModel"; +import { currentCycle } from "./pox_Commands"; +import { Cl } from "@stacks/transactions"; +import { expect } from "vitest"; +import { tx } from "@hirosystems/clarinet-sdk"; + +type CheckFunc = ( + this: StackExtendAuthCommand_Err, + model: Readonly, +) => boolean; + +export class StackExtendAuthCommand_Err implements PoxCommand { + readonly wallet: Wallet; + readonly extendCount: number; + readonly authId: number; + readonly currentCycle: number; + readonly checkFunc: CheckFunc; + readonly errorCode: number; + + /** + * Constructs a `StackExtendAuthCommand_Err` to extend an active stacking lock. + * + * This command calls `stack-extend` using an `authorization`. + * + * @param wallet - Represents the Stacker's wallet. + * @param extendCount - Represents the cycles to extend the stack with. + * @param authId - Unique auth-id for the authorization. + * @param currentCycle - Represents the current PoX reward cycle. + * @param checkFunc - A function to check constraints for running this command. + * @param errorCode - The expected error code when running this command. + */ + constructor( + wallet: Wallet, + extendCount: number, + authId: number, + currentCycle: number, + checkFunc: CheckFunc, + errorCode: number, + ) { + this.wallet = wallet; + this.extendCount = extendCount; + this.authId = authId; + this.currentCycle = currentCycle; + this.checkFunc = checkFunc; + this.errorCode = errorCode; + } + + check = (model: Readonly): boolean => this.checkFunc.call(this, model); + + run(model: Stub, real: Real): void { + const currentRewCycle = currentCycle(real.network); + const stacker = model.stackers.get(this.wallet.stxAddress)!; + + // Include the authorization and the `stack-extend` transactions in a single + // block. This way we ensure both the authorization and the stack-extend + // transactions are called during the same reward cycle, so the authorization + // currentRewCycle param is relevant for the upcoming stack-extend call. + const block = real.network.mineBlock([ + tx.callPublicFn( + "ST000000000000000000002AMW42H.pox-4", + "set-signer-key-authorization", + [ + // (pox-addr (tuple (version (buff 1)) (hashbytes (buff 32)))) + poxAddressToTuple(this.wallet.btcAddress), + // (period uint) + Cl.uint(this.extendCount), + // (reward-cycle uint) + Cl.uint(currentRewCycle), + // (topic (string-ascii 14)) + Cl.stringAscii("stack-extend"), + // (signer-key (buff 33)) + Cl.bufferFromHex(this.wallet.signerPubKey), + // (allowed bool) + Cl.bool(true), + // (max-amount uint) + Cl.uint(stacker.amountLocked), + // (auth-id uint) + Cl.uint(this.authId), + ], + this.wallet.stxAddress, + ), + tx.callPublicFn( + "ST000000000000000000002AMW42H.pox-4", + "stack-extend", + [ + // (extend-count uint) + Cl.uint(this.extendCount), + // (pox-addr { version: (buff 1), hashbytes: (buff 32) }) + poxAddressToTuple(this.wallet.btcAddress), + // (signer-sig (optional (buff 65))) + Cl.none(), + // (signer-key (buff 33)) + Cl.bufferFromHex(this.wallet.signerPubKey), + // (max-amount uint) + Cl.uint(stacker.amountLocked), + // (auth-id uint) + Cl.uint(this.authId), + ], + this.wallet.stxAddress, + ), + ]); + + expect(block[0].result).toBeOk(Cl.bool(true)); + expect(block[1].result).toBeErr(Cl.int(this.errorCode)); + + // Log to console for debugging purposes. This is not necessary for the + // test to pass but it is useful for debugging and eyeballing the test. + logCommand( + `â‚¿ ${model.burnBlockHeight}`, + `✗ ${this.wallet.label}`, + "stack-extend-auth", + "extend-count", + this.extendCount.toString(), + ); + + // Refresh the model's state if the network gets to the next reward cycle. + model.refreshStateForNextRewardCycle(real); + } + + toString() { + // fast-check will call toString() in case of errors, e.g. property failed. + // It will then make a minimal counterexample, a process called 'shrinking' + // https://github.com/dubzzz/fast-check/issues/2864#issuecomment-1098002642 + return `${this.wallet.label} stack-extend auth extend-count ${this.extendCount}`; + } +} diff --git a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackExtendSigCommand.ts b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackExtendSigCommand.ts new file mode 100644 index 00000000000..b937b612074 --- /dev/null +++ b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackExtendSigCommand.ts @@ -0,0 +1,191 @@ +import { Pox4SignatureTopic, poxAddressToTuple } from "@stacks/stacking"; +import { + hasPoolMembers, + isAmountLockedPositive, + isPeriodWithinMax, + isDelegating, + isStacking, + isStackingSolo, + isStackingMinimumCalculated, + logCommand, + PoxCommand, + Real, + Stub, + Wallet, +} from "./pox_CommandModel"; +import { + currentCycle, + FIRST_BURNCHAIN_BLOCK_HEIGHT, + REWARD_CYCLE_LENGTH, +} from "./pox_Commands"; +import { Cl, ClarityType, isClarityType } from "@stacks/transactions"; +import { assert, expect } from "vitest"; + +export class StackExtendSigCommand implements PoxCommand { + readonly wallet: Wallet; + readonly extendCount: number; + readonly authId: number; + readonly currentCycle: number; + + /** + * Constructs a `StackExtendSigCommand` to extend an active stacking lock. + * + * This command calls `stack-extend` using a `signature`. + * + * @param wallet - Represents the Stacker's wallet. + * @param extendCount - Represents the cycles to extend the stack with. + * @param authId - Unique auth-id for the authorization. + * @param currentCycle - Represents the current PoX reward cycle. + * + * Constraints for running this command include: + * - The Stacker must have locked uSTX. + * - The Stacker must be stacking solo. + * - The Stacker must not have delegated to a pool. + * - The new lock period must be less than or equal to 12. + */ + constructor( + wallet: Wallet, + extendCount: number, + authId: number, + currentCycle: number, + ) { + this.wallet = wallet; + this.extendCount = extendCount; + this.authId = authId; + this.currentCycle = currentCycle; + } + + check(model: Readonly): boolean { + // Constraints for running this command include: + // - The Stacker must have locked uSTX. + // - The Stacker must be stacking solo. + // - The Stacker must not have delegated to a pool. + // - The new lock period must be less than or equal to 12. + const stacker = model.stackers.get(this.wallet.stxAddress)!; + + const firstRewardCycle = Math.max( + stacker.firstLockedRewardCycle, + this.currentCycle, + ); + const firstExtendCycle = Math.floor( + (stacker.unlockHeight - FIRST_BURNCHAIN_BLOCK_HEIGHT) / + REWARD_CYCLE_LENGTH, + ); + const lastExtendCycle = firstExtendCycle + this.extendCount - 1; + const totalPeriod = lastExtendCycle - firstRewardCycle + 1; + + return ( + isStackingMinimumCalculated(model) && + isStacking(stacker) && + isStackingSolo(stacker) && + !isDelegating(stacker) && + isAmountLockedPositive(stacker) && + !hasPoolMembers(stacker) && + isPeriodWithinMax(totalPeriod) + ); + } + + run(model: Stub, real: Real): void { + model.trackCommandRun(this.constructor.name); + const currentRewCycle = currentCycle(real.network); + + const stacker = model.stackers.get(this.wallet.stxAddress)!; + + const signerSig = this.wallet.stackingClient.signPoxSignature({ + // The signer key being authorized. + signerPrivateKey: this.wallet.signerPrvKey, + // The reward cycle for which the authorization is valid. + // For `stack-stx` and `stack-extend`, this refers to the reward cycle + // where the transaction is confirmed. For `stack-aggregation-commit`, + // this refers to the reward cycle argument in that function. + rewardCycle: currentRewCycle, + // For `stack-stx`, this refers to `lock-period`. For `stack-extend`, + // this refers to `extend-count`. For `stack-aggregation-commit`, this is + // `u1`. + period: this.extendCount, + // A string representing the function where this authorization is valid. + // Either `stack-stx`, `stack-extend`, `stack-increase` or `agg-commit`. + topic: Pox4SignatureTopic.StackExtend, + // The PoX address that can be used with this signer key. + poxAddress: this.wallet.btcAddress, + // The unique auth-id for this authorization. + authId: this.authId, + // The maximum amount of uSTX that can be used (per tx) with this signer + // key. + maxAmount: stacker.amountLocked, + }); + + const stackExtend = real.network.callPublicFn( + "ST000000000000000000002AMW42H.pox-4", + "stack-extend", + [ + // (extend-count uint) + Cl.uint(this.extendCount), + // (pox-addr { version: (buff 1), hashbytes: (buff 32) }) + poxAddressToTuple(this.wallet.btcAddress), + // (signer-sig (optional (buff 65))) + Cl.some(Cl.bufferFromHex(signerSig)), + // (signer-key (buff 33)) + Cl.bufferFromHex(this.wallet.signerPubKey), + // (max-amount uint) + Cl.uint(stacker.amountLocked), + // (auth-id uint) + Cl.uint(this.authId), + ], + this.wallet.stxAddress, + ); + + const { result: firstExtendCycle } = real.network.callReadOnlyFn( + "ST000000000000000000002AMW42H.pox-4", + "burn-height-to-reward-cycle", + [Cl.uint(stacker.unlockHeight)], + this.wallet.stxAddress, + ); + assert(isClarityType(firstExtendCycle, ClarityType.UInt)); + + const lastExtendCycle = Number(firstExtendCycle.value) + this.extendCount - + 1; + + const { result: extendedUnlockHeight } = real.network.callReadOnlyFn( + "ST000000000000000000002AMW42H.pox-4", + "reward-cycle-to-burn-height", + [Cl.uint(lastExtendCycle + 1)], + this.wallet.stxAddress, + ); + assert(isClarityType(extendedUnlockHeight, ClarityType.UInt)); + + const newUnlockHeight = extendedUnlockHeight.value; + + expect(stackExtend.result).toBeOk( + Cl.tuple({ + stacker: Cl.principal(this.wallet.stxAddress), + "unlock-burn-height": Cl.uint(newUnlockHeight), + }), + ); + + // Get the wallet from the model and update it with the new state. + const wallet = model.stackers.get(this.wallet.stxAddress)!; + // Update model so that we know this wallet's unlock height was extended. + wallet.unlockHeight = Number(newUnlockHeight); + + // Log to console for debugging purposes. This is not necessary for the + // test to pass but it is useful for debugging and eyeballing the test. + logCommand( + `â‚¿ ${model.burnBlockHeight}`, + `✓ ${this.wallet.label}`, + "stack-extend-sig", + "extend-count", + this.extendCount.toString(), + ); + + // Refresh the model's state if the network gets to the next reward cycle. + model.refreshStateForNextRewardCycle(real); + } + + toString() { + // fast-check will call toString() in case of errors, e.g. property failed. + // It will then make a minimal counterexample, a process called 'shrinking' + // https://github.com/dubzzz/fast-check/issues/2864#issuecomment-1098002642 + return `${this.wallet.label} stack-extend sig extend-count ${this.extendCount}`; + } +} diff --git a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackExtendSigCommand_Err.ts b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackExtendSigCommand_Err.ts new file mode 100644 index 00000000000..9c37b96a601 --- /dev/null +++ b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackExtendSigCommand_Err.ts @@ -0,0 +1,121 @@ +import { Pox4SignatureTopic, poxAddressToTuple } from "@stacks/stacking"; +import { logCommand, PoxCommand, Real, Stub, Wallet } from "./pox_CommandModel"; +import { currentCycle } from "./pox_Commands"; +import { Cl } from "@stacks/transactions"; +import { expect } from "vitest"; + +type CheckFunc = ( + this: StackExtendSigCommand_Err, + model: Readonly, +) => boolean; + +export class StackExtendSigCommand_Err implements PoxCommand { + readonly wallet: Wallet; + readonly extendCount: number; + readonly authId: number; + readonly currentCycle: number; + readonly checkFunc: CheckFunc; + readonly errorCode: number; + + /** + * Constructs a `StackExtendSigCommand_Err` to extend an active stacking lock. + * + * This command calls `stack-extend` using a `signature`. + * + * @param wallet - Represents the Stacker's wallet. + * @param extendCount - Represents the cycles to extend the stack with. + * @param authId - Unique auth-id for the authorization. + * @param currentCycle - Represents the current PoX reward cycle. + * @param checkFunc - A function to check constraints for running this command. + * @param errorCode - The expected error code when running this command. + */ + constructor( + wallet: Wallet, + extendCount: number, + authId: number, + currentCycle: number, + checkFunc: CheckFunc, + errorCode: number, + ) { + this.wallet = wallet; + this.extendCount = extendCount; + this.authId = authId; + this.currentCycle = currentCycle; + this.checkFunc = checkFunc; + this.errorCode = errorCode; + } + + check = (model: Readonly): boolean => this.checkFunc.call(this, model); + + run(model: Stub, real: Real): void { + const currentRewCycle = currentCycle(real.network); + + const stacker = model.stackers.get(this.wallet.stxAddress)!; + + const signerSig = this.wallet.stackingClient.signPoxSignature({ + // The signer key being authorized. + signerPrivateKey: this.wallet.signerPrvKey, + // The reward cycle for which the authorization is valid. + // For `stack-stx` and `stack-extend`, this refers to the reward cycle + // where the transaction is confirmed. For `stack-aggregation-commit`, + // this refers to the reward cycle argument in that function. + rewardCycle: currentRewCycle, + // For `stack-stx`, this refers to `lock-period`. For `stack-extend`, + // this refers to `extend-count`. For `stack-aggregation-commit`, this is + // `u1`. + period: this.extendCount, + // A string representing the function where this authorization is valid. + // Either `stack-stx`, `stack-extend`, `stack-increase` or `agg-commit`. + topic: Pox4SignatureTopic.StackExtend, + // The PoX address that can be used with this signer key. + poxAddress: this.wallet.btcAddress, + // The unique auth-id for this authorization. + authId: this.authId, + // The maximum amount of uSTX that can be used (per tx) with this signer + // key. + maxAmount: stacker.amountLocked, + }); + + const stackExtend = real.network.callPublicFn( + "ST000000000000000000002AMW42H.pox-4", + "stack-extend", + [ + // (extend-count uint) + Cl.uint(this.extendCount), + // (pox-addr { version: (buff 1), hashbytes: (buff 32) }) + poxAddressToTuple(this.wallet.btcAddress), + // (signer-sig (optional (buff 65))) + Cl.some(Cl.bufferFromHex(signerSig)), + // (signer-key (buff 33)) + Cl.bufferFromHex(this.wallet.signerPubKey), + // (max-amount uint) + Cl.uint(stacker.amountLocked), + // (auth-id uint) + Cl.uint(this.authId), + ], + this.wallet.stxAddress, + ); + + expect(stackExtend.result).toBeErr(Cl.int(this.errorCode)); + + // Log to console for debugging purposes. This is not necessary for the + // test to pass but it is useful for debugging and eyeballing the test. + logCommand( + `â‚¿ ${model.burnBlockHeight}`, + `✗ ${this.wallet.label}`, + "stack-extend-sig", + "extend-count", + this.extendCount.toString(), + ); + + // Refresh the model's state if the network gets to the next reward cycle. + model.refreshStateForNextRewardCycle(real); + } + + toString() { + // fast-check will call toString() in case of errors, e.g. property failed. + // It will then make a minimal counterexample, a process called 'shrinking' + // https://github.com/dubzzz/fast-check/issues/2864#issuecomment-1098002642 + return `${this.wallet.label} stack-extend sig extend-count ${this.extendCount}`; + } +} diff --git a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackIncreaseAuthCommand.ts b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackIncreaseAuthCommand.ts new file mode 100644 index 00000000000..d819a822155 --- /dev/null +++ b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackIncreaseAuthCommand.ts @@ -0,0 +1,175 @@ +import { Pox4SignatureTopic, poxAddressToTuple } from "@stacks/stacking"; +import { + isAmountLockedPositive, + isIncreaseAmountGTZero, + isIncreaseByWithinUnlockedBalance, + isDelegating, + isStacking, + isStackingSolo, + isStackingMinimumCalculated, + logCommand, + PoxCommand, + Real, + Stub, + Wallet, +} from "./pox_CommandModel"; +import { currentCycle } from "./pox_Commands"; +import { Cl, cvToJSON } from "@stacks/transactions"; +import { expect } from "vitest"; +import { tx } from "@hirosystems/clarinet-sdk"; + +/** + * The `StackIncreaseAuthCommand` locks up an additional amount + * of STX from `tx-sender`'s, indicated by `increase-by`. + * + * This command calls `stack-increase` using an `authorization`. + * + * Constraints for running this command include: + * - The Stacker must have locked uSTX. + * - The Stacker must be stacking solo. + * - The Stacker must not have delegated to a pool. + * - The increase amount must be less than or equal to the + * Stacker's unlocked uSTX amount. + */ + +export class StackIncreaseAuthCommand implements PoxCommand { + readonly wallet: Wallet; + readonly increaseBy: number; + readonly authId: number; + + /** + * Constructs a `StackIncreaseAuthCommand` to increase the locked uSTX amount. + * + * @param wallet - Represents the Stacker's wallet. + * @param increaseBy - Represents the locked amount to be increased by. + * @param authId - Unique auth-id for the authorization. + */ + constructor(wallet: Wallet, increaseBy: number, authId: number) { + this.wallet = wallet; + this.increaseBy = increaseBy; + this.authId = authId; + } + + check(model: Readonly): boolean { + // Constraints for running this command include: + // - The Stacker must have locked uSTX. + // - The Stacker must be stacking solo. + // - The Stacker must not have delegated to a pool. + // - The increse amount must be less or equal to the + // Stacker's unlocked uSTX amount. + const stacker = model.stackers.get(this.wallet.stxAddress)!; + + return ( + isStackingMinimumCalculated(model) && + isStacking(stacker) && + isStackingSolo(stacker) && + !isDelegating(stacker) && + isAmountLockedPositive(stacker) && + isIncreaseByWithinUnlockedBalance(stacker, this.increaseBy) && + isIncreaseAmountGTZero(this.increaseBy) + ); + } + + run(model: Stub, real: Real): void { + model.trackCommandRun(this.constructor.name); + + const currentRewCycle = currentCycle(real.network); + const stacker = model.stackers.get(this.wallet.stxAddress)!; + + // Get the lock period from the stacking state. This will be used for correctly + // issuing the authorization. + const stackingStateCV = real.network.getMapEntry( + "ST000000000000000000002AMW42H.pox-4", + "stacking-state", + Cl.tuple({ stacker: Cl.principal(this.wallet.stxAddress) }), + ); + const period = cvToJSON(stackingStateCV).value.value["lock-period"].value; + + const maxAmount = stacker.amountLocked + this.increaseBy; + + // Act + + // Include the authorization and the `stack-increase` transactions in a single + // block. This way we ensure both the authorization and the stack-increase + // transactions are called during the same reward cycle and avoid the clarity + // error `ERR_INVALID_REWARD_CYCLE`. + const block = real.network.mineBlock([ + tx.callPublicFn( + "ST000000000000000000002AMW42H.pox-4", + "set-signer-key-authorization", + [ + // (pox-addr (tuple (version (buff 1)) (hashbytes (buff 32)))) + poxAddressToTuple(this.wallet.btcAddress), + // (period uint) + Cl.uint(period), + // (reward-cycle uint) + Cl.uint(currentRewCycle), + // (topic (string-ascii 14)) + Cl.stringAscii(Pox4SignatureTopic.StackIncrease), + // (signer-key (buff 33)) + Cl.bufferFromHex(this.wallet.signerPubKey), + // (allowed bool) + Cl.bool(true), + // (max-amount uint) + Cl.uint(maxAmount), + // (auth-id uint) + Cl.uint(this.authId), + ], + this.wallet.stxAddress, + ), + tx.callPublicFn( + "ST000000000000000000002AMW42H.pox-4", + "stack-increase", + [ + // (increase-by uint) + Cl.uint(this.increaseBy), + // (signer-sig (optional (buff 65))) + Cl.none(), + // (signer-key (buff 33)) + Cl.bufferFromHex(this.wallet.signerPubKey), + // (max-amount uint) + Cl.uint(maxAmount), + // (auth-id uint) + Cl.uint(this.authId), + ], + this.wallet.stxAddress, + ), + ]); + + // Assert + expect(block[0].result).toBeOk(Cl.bool(true)); + expect(block[1].result).toBeOk( + Cl.tuple({ + stacker: Cl.principal(this.wallet.stxAddress), + "total-locked": Cl.uint(stacker.amountLocked + this.increaseBy), + }), + ); + + // Get the wallet from the model and update it with the new state. + const wallet = model.stackers.get(this.wallet.stxAddress)!; + // Update model so that we know this wallet's locked amount and unlocked + // amount was extended. + wallet.amountLocked += this.increaseBy; + wallet.amountUnlocked -= this.increaseBy; + + // Log to console for debugging purposes. This is not necessary for the + // test to pass but it is useful for debugging and eyeballing the test. + logCommand( + `â‚¿ ${model.burnBlockHeight}`, + `✓ ${this.wallet.label}`, + "stack-increase-auth", + "increase-by", + this.increaseBy.toString(), + ); + + // Refresh the model's state if the network gets to the next reward cycle. + model.refreshStateForNextRewardCycle(real); + } + + toString() { + // fast-check will call toString() in case of errors, e.g. property failed. + // It will then make a minimal counterexample, a process called 'shrinking' + // https://github.com/dubzzz/fast-check/issues/2864#issuecomment-1098002642 + return `${this.wallet.label} stack-increase auth increase-by ${this.increaseBy}`; + } +} diff --git a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackIncreaseAuthCommand_Err.ts b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackIncreaseAuthCommand_Err.ts new file mode 100644 index 00000000000..5722b502360 --- /dev/null +++ b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackIncreaseAuthCommand_Err.ts @@ -0,0 +1,133 @@ +import { Pox4SignatureTopic, poxAddressToTuple } from "@stacks/stacking"; +import { logCommand, PoxCommand, Real, Stub, Wallet } from "./pox_CommandModel"; +import { currentCycle } from "./pox_Commands"; +import { Cl, cvToJSON } from "@stacks/transactions"; +import { expect } from "vitest"; +import { tx } from "@hirosystems/clarinet-sdk"; + +type CheckFunc = ( + this: StackIncreaseAuthCommand_Err, + model: Readonly, +) => boolean; + +export class StackIncreaseAuthCommand_Err implements PoxCommand { + readonly wallet: Wallet; + readonly increaseBy: number; + readonly authId: number; + readonly checkFunc: CheckFunc; + readonly errorCode: number; + + /** + * Constructs a `StackIncreaseAuthCommand_Err` to increase the locked uSTX amount. + * + * @param wallet - Represents the Stacker's wallet. + * @param increaseBy - Represents the locked amount to be increased by. + * @param authId - Unique auth-id for the authorization. + * @param checkFunc - A function to check constraints for running this command. + * @param errorCode - The expected error code when running this command. + */ + constructor( + wallet: Wallet, + increaseBy: number, + authId: number, + checkFunc: CheckFunc, + errorCode: number, + ) { + this.wallet = wallet; + this.increaseBy = increaseBy; + this.authId = authId; + this.checkFunc = checkFunc; + this.errorCode = errorCode; + } + + check = (model: Readonly): boolean => this.checkFunc.call(this, model); + + run(model: Stub, real: Real): void { + const currentRewCycle = currentCycle(real.network); + const stacker = model.stackers.get(this.wallet.stxAddress)!; + + // Get the lock period from the stacking state. This will be used for correctly + // issuing the authorization. + const stackingStateCV = real.network.getMapEntry( + "ST000000000000000000002AMW42H.pox-4", + "stacking-state", + Cl.tuple({ stacker: Cl.principal(this.wallet.stxAddress) }), + ); + const period = cvToJSON(stackingStateCV).value.value["lock-period"].value; + + const maxAmount = stacker.amountLocked + this.increaseBy; + + // Act + + // Include the authorization and the `stack-increase` transactions in a single + // block. This way we ensure both the authorization and the stack-increase + // transactions are called during the same reward cycle and avoid the clarity + // error `ERR_INVALID_REWARD_CYCLE`. + const block = real.network.mineBlock([ + tx.callPublicFn( + "ST000000000000000000002AMW42H.pox-4", + "set-signer-key-authorization", + [ + // (pox-addr (tuple (version (buff 1)) (hashbytes (buff 32)))) + poxAddressToTuple(this.wallet.btcAddress), + // (period uint) + Cl.uint(period), + // (reward-cycle uint) + Cl.uint(currentRewCycle), + // (topic (string-ascii 14)) + Cl.stringAscii(Pox4SignatureTopic.StackIncrease), + // (signer-key (buff 33)) + Cl.bufferFromHex(this.wallet.signerPubKey), + // (allowed bool) + Cl.bool(true), + // (max-amount uint) + Cl.uint(maxAmount), + // (auth-id uint) + Cl.uint(this.authId), + ], + this.wallet.stxAddress, + ), + tx.callPublicFn( + "ST000000000000000000002AMW42H.pox-4", + "stack-increase", + [ + // (increase-by uint) + Cl.uint(this.increaseBy), + // (signer-sig (optional (buff 65))) + Cl.none(), + // (signer-key (buff 33)) + Cl.bufferFromHex(this.wallet.signerPubKey), + // (max-amount uint) + Cl.uint(maxAmount), + // (auth-id uint) + Cl.uint(this.authId), + ], + this.wallet.stxAddress, + ), + ]); + + // Assert + expect(block[0].result).toBeOk(Cl.bool(true)); + expect(block[1].result).toBeErr(Cl.int(this.errorCode)); + + // Log to console for debugging purposes. This is not necessary for the + // test to pass but it is useful for debugging and eyeballing the test. + logCommand( + `â‚¿ ${model.burnBlockHeight}`, + `✗ ${this.wallet.label}`, + "stack-increase-auth", + "increase-by", + this.increaseBy.toString(), + ); + + // Refresh the model's state if the network gets to the next reward cycle. + model.refreshStateForNextRewardCycle(real); + } + + toString() { + // fast-check will call toString() in case of errors, e.g. property failed. + // It will then make a minimal counterexample, a process called 'shrinking' + // https://github.com/dubzzz/fast-check/issues/2864#issuecomment-1098002642 + return `${this.wallet.label} stack-increase auth increase-by ${this.increaseBy}`; + } +} diff --git a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackIncreaseSigCommand.ts b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackIncreaseSigCommand.ts new file mode 100644 index 00000000000..899be8900ec --- /dev/null +++ b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackIncreaseSigCommand.ts @@ -0,0 +1,185 @@ +import { Pox4SignatureTopic } from "@stacks/stacking"; +import { + isAmountLockedPositive, + isIncreaseAmountGTZero, + isIncreaseByWithinUnlockedBalance, + isDelegating, + isStacking, + isStackingSolo, + isStackingMinimumCalculated, + logCommand, + PoxCommand, + Real, + Stub, + Wallet, +} from "./pox_CommandModel"; +import { + Cl, + ClarityType, + ClarityValue, + cvToJSON, + cvToValue, + isClarityType, +} from "@stacks/transactions"; +import { assert, expect } from "vitest"; + +/** + * The `StackIncreaseSigCommand` locks up an additional amount + * of STX from `tx-sender`'s, indicated by `increase-by`. + * + * This command calls `stack-increase` using a `signature`. + * + * Constraints for running this command include: + * - The Stacker must have locked uSTX. + * - The Stacker must be stacking solo. + * - The Stacker must not have delegated to a pool. + * - The increase amount must be less than or equal to the + * Stacker's unlocked uSTX amount. + * - The increase amount must be equal or greater than 1. + */ +export class StackIncreaseSigCommand implements PoxCommand { + readonly wallet: Wallet; + readonly increaseBy: number; + readonly authId: number; + + /** + * Constructs a `StackIncreaseSigCommand` to increase the locked uSTX amount. + * + * @param wallet - Represents the Stacker's wallet. + * @param increaseBy - Represents the locked amount to be increased by. + * @param authId - Unique auth-id for the authorization. + */ + constructor(wallet: Wallet, increaseBy: number, authId: number) { + this.wallet = wallet; + this.increaseBy = increaseBy; + this.authId = authId; + } + + check(model: Readonly): boolean { + // Constraints for running this command include: + // - The Stacker must have locked uSTX. + // - The Stacker must be stacking solo. + // - The Stacker must not have delegated to a pool. + // - The increse amount must be less than or equal to the + // Stacker's unlocked uSTX amount. + // - The increase amount must be equal or greater than 1. + const stacker = model.stackers.get(this.wallet.stxAddress)!; + + return ( + isStackingMinimumCalculated(model) && + isStacking(stacker) && + isStackingSolo(stacker) && + !isDelegating(stacker) && + isAmountLockedPositive(stacker) && + isIncreaseByWithinUnlockedBalance(stacker, this.increaseBy) && + isIncreaseAmountGTZero(this.increaseBy) + ); + } + + run(model: Stub, real: Real): void { + model.trackCommandRun(this.constructor.name); + + const stacker = model.stackers.get(this.wallet.stxAddress)!; + + const maxAmount = stacker.amountLocked + this.increaseBy; + + const burnBlockHeightCV = real.network.runSnippet("burn-block-height"); + const burnBlockHeight = Number( + cvToValue(burnBlockHeightCV as ClarityValue), + ); + + const { result: rewardCycleNextBlockCV } = real.network.callReadOnlyFn( + "ST000000000000000000002AMW42H.pox-4", + "burn-height-to-reward-cycle", + [Cl.uint(burnBlockHeight + 1)], + this.wallet.stxAddress, + ); + assert(isClarityType(rewardCycleNextBlockCV, ClarityType.UInt)); + + const rewardCycleNextBlock = cvToValue(rewardCycleNextBlockCV); + + // Get the lock period from the stacking state. This will be used for correctly + // issuing the authorization. + const stackingStateCV = real.network.getMapEntry( + "ST000000000000000000002AMW42H.pox-4", + "stacking-state", + Cl.tuple({ stacker: Cl.principal(this.wallet.stxAddress) }), + ); + const period = cvToJSON(stackingStateCV).value.value["lock-period"].value; + + const signerSig = this.wallet.stackingClient.signPoxSignature({ + // The signer key being authorized. + signerPrivateKey: this.wallet.signerPrvKey, + // The reward cycle for which the authorization is valid. + // For `stack-stx` and `stack-extend`, this refers to the reward cycle + // where the transaction is confirmed. For `stack-aggregation-commit`, + // this refers to the reward cycle argument in that function. + rewardCycle: rewardCycleNextBlock, + // For `stack-stx`, this refers to `lock-period`. For `stack-extend`, + // this refers to `extend-count`. For `stack-aggregation-commit`, this is + // `u1`. + period: period, + // A string representing the function where this authorization is valid. + // Either `stack-stx`, `stack-extend`, `stack-increase` or `agg-commit`. + topic: Pox4SignatureTopic.StackIncrease, + // The PoX address that can be used with this signer key. + poxAddress: this.wallet.btcAddress, + // The unique auth-id for this authorization. + authId: this.authId, + // The maximum amount of uSTX that can be used (per tx) with this signer + // key. + maxAmount: maxAmount, + }); + + const stackIncrease = real.network.callPublicFn( + "ST000000000000000000002AMW42H.pox-4", + "stack-increase", + [ + // (increase-by uint) + Cl.uint(this.increaseBy), + // (signer-sig (optional (buff 65))) + Cl.some(Cl.bufferFromHex(signerSig)), + // (signer-key (buff 33)) + Cl.bufferFromHex(this.wallet.signerPubKey), + // (max-amount uint) + Cl.uint(maxAmount), + // (auth-id uint) + Cl.uint(this.authId), + ], + this.wallet.stxAddress, + ); + + expect(stackIncrease.result).toBeOk( + Cl.tuple({ + stacker: Cl.principal(this.wallet.stxAddress), + "total-locked": Cl.uint(stacker.amountLocked + this.increaseBy), + }), + ); + + // Get the wallet from the model and update it with the new state. + const wallet = model.stackers.get(this.wallet.stxAddress)!; + // Update model so that we know this wallet's locked amount and unlocked amount was extended. + wallet.amountLocked += this.increaseBy; + wallet.amountUnlocked -= this.increaseBy; + + // Log to console for debugging purposes. This is not necessary for the + // test to pass but it is useful for debugging and eyeballing the test. + logCommand( + `â‚¿ ${model.burnBlockHeight}`, + `✓ ${this.wallet.label}`, + "stack-increase-sig", + "increase-by", + this.increaseBy.toString(), + ); + + // Refresh the model's state if the network gets to the next reward cycle. + model.refreshStateForNextRewardCycle(real); + } + + toString() { + // fast-check will call toString() in case of errors, e.g. property failed. + // It will then make a minimal counterexample, a process called 'shrinking' + // https://github.com/dubzzz/fast-check/issues/2864#issuecomment-1098002642 + return `${this.wallet.label} stack-increase sig increase-by ${this.increaseBy}`; + } +} diff --git a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackIncreaseSigCommand_Err.ts b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackIncreaseSigCommand_Err.ts new file mode 100644 index 00000000000..4d0297b624c --- /dev/null +++ b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackIncreaseSigCommand_Err.ts @@ -0,0 +1,143 @@ +import { Pox4SignatureTopic } from "@stacks/stacking"; +import { logCommand, PoxCommand, Real, Stub, Wallet } from "./pox_CommandModel"; +import { + Cl, + ClarityType, + ClarityValue, + cvToJSON, + cvToValue, + isClarityType, +} from "@stacks/transactions"; +import { assert, expect } from "vitest"; + +type CheckFunc = ( + this: StackIncreaseSigCommand_Err, + model: Readonly, +) => boolean; + +export class StackIncreaseSigCommand_Err implements PoxCommand { + readonly wallet: Wallet; + readonly increaseBy: number; + readonly authId: number; + readonly checkFunc: CheckFunc; + readonly errorCode: number; + + /** + * Constructs a `StackIncreaseSigCommand_Err` to increase the locked uSTX amount. + * + * @param wallet - Represents the Stacker's wallet. + * @param increaseBy - Represents the locked amount to be increased by. + * @param authId - Unique auth-id for the authorization. + * @param checkFunc - A function to check constraints for running this command. + * @param errorCode - The expected error code when running this command. + */ + constructor( + wallet: Wallet, + increaseBy: number, + authId: number, + checkFunc: CheckFunc, + errorCode: number, + ) { + this.wallet = wallet; + this.increaseBy = increaseBy; + this.authId = authId; + this.checkFunc = checkFunc; + this.errorCode = errorCode; + } + + check = (model: Readonly): boolean => this.checkFunc.call(this, model); + + run(model: Stub, real: Real): void { + const stacker = model.stackers.get(this.wallet.stxAddress)!; + + const maxAmount = stacker.amountLocked + this.increaseBy; + + const burnBlockHeightCV = real.network.runSnippet("burn-block-height"); + const burnBlockHeight = Number( + cvToValue(burnBlockHeightCV as ClarityValue), + ); + + const { result: rewardCycleNextBlockCV } = real.network.callReadOnlyFn( + "ST000000000000000000002AMW42H.pox-4", + "burn-height-to-reward-cycle", + [Cl.uint(burnBlockHeight + 1)], + this.wallet.stxAddress, + ); + assert(isClarityType(rewardCycleNextBlockCV, ClarityType.UInt)); + + const rewardCycleNextBlock = cvToValue(rewardCycleNextBlockCV); + + // Get the lock period from the stacking state. This will be used for correctly + // issuing the authorization. + const stackingStateCV = real.network.getMapEntry( + "ST000000000000000000002AMW42H.pox-4", + "stacking-state", + Cl.tuple({ stacker: Cl.principal(this.wallet.stxAddress) }), + ); + const period = cvToJSON(stackingStateCV).value.value["lock-period"].value; + + const signerSig = this.wallet.stackingClient.signPoxSignature({ + // The signer key being authorized. + signerPrivateKey: this.wallet.signerPrvKey, + // The reward cycle for which the authorization is valid. + // For `stack-stx` and `stack-extend`, this refers to the reward cycle + // where the transaction is confirmed. For `stack-aggregation-commit`, + // this refers to the reward cycle argument in that function. + rewardCycle: rewardCycleNextBlock, + // For `stack-stx`, this refers to `lock-period`. For `stack-extend`, + // this refers to `extend-count`. For `stack-aggregation-commit`, this is + // `u1`. + period: period, + // A string representing the function where this authorization is valid. + // Either `stack-stx`, `stack-extend`, `stack-increase` or `agg-commit`. + topic: Pox4SignatureTopic.StackIncrease, + // The PoX address that can be used with this signer key. + poxAddress: this.wallet.btcAddress, + // The unique auth-id for this authorization. + authId: this.authId, + // The maximum amount of uSTX that can be used (per tx) with this signer + // key. + maxAmount: maxAmount, + }); + + const stackIncrease = real.network.callPublicFn( + "ST000000000000000000002AMW42H.pox-4", + "stack-increase", + [ + // (increase-by uint) + Cl.uint(this.increaseBy), + // (signer-sig (optional (buff 65))) + Cl.some(Cl.bufferFromHex(signerSig)), + // (signer-key (buff 33)) + Cl.bufferFromHex(this.wallet.signerPubKey), + // (max-amount uint) + Cl.uint(maxAmount), + // (auth-id uint) + Cl.uint(this.authId), + ], + this.wallet.stxAddress, + ); + + expect(stackIncrease.result).toBeErr(Cl.int(this.errorCode)); + + // Log to console for debugging purposes. This is not necessary for the + // test to pass but it is useful for debugging and eyeballing the test. + logCommand( + `â‚¿ ${model.burnBlockHeight}`, + `✗ ${this.wallet.label}`, + "stack-increase-sig", + "increase-by", + this.increaseBy.toString(), + ); + + // Refresh the model's state if the network gets to the next reward cycle. + model.refreshStateForNextRewardCycle(real); + } + + toString() { + // fast-check will call toString() in case of errors, e.g. property failed. + // It will then make a minimal counterexample, a process called 'shrinking' + // https://github.com/dubzzz/fast-check/issues/2864#issuecomment-1098002642 + return `${this.wallet.label} stack-increase sig increase-by ${this.increaseBy}`; + } +} diff --git a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackStxAuthCommand.ts b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackStxAuthCommand.ts new file mode 100644 index 00000000000..de3bc96964f --- /dev/null +++ b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackStxAuthCommand.ts @@ -0,0 +1,211 @@ +import { + isDelegating, + isStacking, + isStackingMinimumCalculated, + logCommand, + PoxCommand, + Real, + Stub, + Wallet, +} from "./pox_CommandModel.ts"; +import { poxAddressToTuple } from "@stacks/stacking"; +import { assert, expect } from "vitest"; +import { + Cl, + ClarityType, + ClarityValue, + cvToValue, + isClarityType, +} from "@stacks/transactions"; +import { currentCycle } from "./pox_Commands.ts"; +import { tx } from "@hirosystems/clarinet-sdk"; + +/** + * The `StackStxAuthCommand` locks STX for stacking within PoX-4. This self-service + * operation allows the `tx-sender` (the `wallet` in this case) to participate + * as a Stacker. + * + * This command calls `stack-stx` using an `authorization`. + * + * Constraints for running this command include: + * - The Stacker cannot currently be engaged in another stacking operation. + * - A minimum threshold of uSTX must be met, determined by the + * `get-stacking-minimum` function at the time of this call. + * - The amount of uSTX locked may need to be increased in future reward cycles + * if the minimum threshold rises. + */ +export class StackStxAuthCommand implements PoxCommand { + readonly wallet: Wallet; + readonly authId: number; + readonly period: number; + readonly margin: number; + + /** + * Constructs a `StackStxAuthCommand` to lock uSTX for stacking. + * + * @param wallet - Represents the Stacker's wallet. + * @param authId - Unique auth-id for the authorization. + * @param period - Number of reward cycles to lock uSTX. + * @param margin - Multiplier for minimum required uSTX to stack so that each + * Stacker locks a different amount of uSTX across test runs. + */ + constructor( + wallet: Wallet, + authId: number, + period: number, + margin: number, + ) { + this.wallet = wallet; + this.authId = authId; + this.period = period; + this.margin = margin; + } + + check(model: Readonly): boolean { + // Constraints for running this command include: + // - A minimum threshold of uSTX must be met, determined by the + // `get-stacking-minimum` function at the time of this call. + // - The Stacker cannot currently be engaged in another stacking operation. + // - The Stacker cannot currently be delegating STX to a delegatee. + + const stacker = model.stackers.get(this.wallet.stxAddress)!; + return ( + isStackingMinimumCalculated(model) && + !isStacking(stacker) && + !isDelegating(stacker) + ); + } + + run(model: Stub, real: Real): void { + model.trackCommandRun(this.constructor.name); + const currentRewCycle = currentCycle(real.network); + + // The maximum amount of uSTX that can be used (per tx) with this signer + // key. For our tests, we will use the minimum amount of uSTX to be stacked + // in the given reward cycle multiplied by the margin, which is a randomly + // generated number passed to the constructor of this class. + const maxAmount = model.stackingMinimum * this.margin; + + const burnBlockHeightCV = real.network.runSnippet("burn-block-height"); + const burnBlockHeight = Number( + cvToValue(burnBlockHeightCV as ClarityValue), + ); + + // The amount of uSTX to be locked in the reward cycle. For this test, we + // will use the maximum amount of uSTX that can be used (per tx) with this + // signer key. + const amountUstx = maxAmount; + + // Include the authorization and the `stack-stx` transactions in a single + // block. This way we ensure both the authorization and the stack-stx + // transactions are called during the same reward cycle, so the authorization + // currentRewCycle param is relevant for the upcoming stack-stx call. + const block = real.network.mineBlock([ + tx.callPublicFn( + "ST000000000000000000002AMW42H.pox-4", + "set-signer-key-authorization", + [ + // (pox-addr (tuple (version (buff 1)) (hashbytes (buff 32)))) + poxAddressToTuple(this.wallet.btcAddress), + // (period uint) + Cl.uint(this.period), + // (reward-cycle uint) + Cl.uint(currentRewCycle), + // (topic (string-ascii 14)) + Cl.stringAscii("stack-stx"), + // (signer-key (buff 33)) + Cl.bufferFromHex(this.wallet.signerPubKey), + // (allowed bool) + Cl.bool(true), + // (max-amount uint) + Cl.uint(maxAmount), + // (auth-id uint) + Cl.uint(this.authId), + ], + this.wallet.stxAddress, + ), + tx.callPublicFn("ST000000000000000000002AMW42H.pox-4", "stack-stx", [ + // (amount-ustx uint) + Cl.uint(amountUstx), + // (pox-addr (tuple (version (buff 1)) (hashbytes (buff 32)))) + poxAddressToTuple(this.wallet.btcAddress), + // (start-burn-ht uint) + Cl.uint(burnBlockHeight + 1), + // (lock-period uint) + Cl.uint(this.period), + // (signer-sig (optional (buff 65))) + Cl.none(), + // (signer-key (buff 33)) + Cl.bufferFromHex(this.wallet.signerPubKey), + // (max-amount uint) + Cl.uint(maxAmount), + // (auth-id uint) + Cl.uint(this.authId), + ], this.wallet.stxAddress), + ]); + + expect(block[0].result).toBeOk(Cl.bool(true)); + + const { result: rewardCycle } = real.network.callReadOnlyFn( + "ST000000000000000000002AMW42H.pox-4", + "burn-height-to-reward-cycle", + [Cl.uint(burnBlockHeight)], + this.wallet.stxAddress, + ); + assert(isClarityType(rewardCycle, ClarityType.UInt)); + + const { result: unlockBurnHeight } = real.network.callReadOnlyFn( + "ST000000000000000000002AMW42H.pox-4", + "reward-cycle-to-burn-height", + [Cl.uint(Number(rewardCycle.value) + this.period + 1)], + this.wallet.stxAddress, + ); + assert(isClarityType(unlockBurnHeight, ClarityType.UInt)); + + expect(block[1].result).toBeOk( + Cl.tuple({ + "lock-amount": Cl.uint(amountUstx), + "signer-key": Cl.bufferFromHex(this.wallet.signerPubKey), + "stacker": Cl.principal(this.wallet.stxAddress), + "unlock-burn-height": Cl.uint(Number(unlockBurnHeight.value)), + }), + ); + + // Get the wallet from the model and update it with the new state. + const wallet = model.stackers.get(this.wallet.stxAddress)!; + // Update model so that we know this wallet is stacking. This is important + // in order to prevent the test from stacking multiple times with the same + // address. + wallet.isStacking = true; + wallet.isStackingSolo = true; + // Update locked, unlocked, and unlock-height fields in the model. + wallet.amountLocked = amountUstx; + wallet.unlockHeight = Number(unlockBurnHeight.value); + wallet.amountUnlocked -= amountUstx; + wallet.firstLockedRewardCycle = Number(rewardCycle.value) + 1; + + model.nextRewardSetIndex++; + + // Log to console for debugging purposes. This is not necessary for the + // test to pass but it is useful for debugging and eyeballing the test. + logCommand( + `â‚¿ ${model.burnBlockHeight}`, + `✓ ${this.wallet.label}`, + "stack-stx-auth", + "lock-amount", + amountUstx.toString(), + "period", + this.period.toString(), + ); + + // Refresh the model's state if the network gets to the next reward cycle. + model.refreshStateForNextRewardCycle(real); + } + + toString() { + // fast-check will call toString() in case of errors, e.g. property failed. + // It will then make a minimal counterexample, a process called 'shrinking' + // https://github.com/dubzzz/fast-check/issues/2864#issuecomment-1098002642 + return `${this.wallet.label} stack-stx auth auth-id ${this.authId} and period ${this.period}`; + } +} diff --git a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackStxAuthCommand_Err.ts b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackStxAuthCommand_Err.ts new file mode 100644 index 00000000000..37f32a54588 --- /dev/null +++ b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackStxAuthCommand_Err.ts @@ -0,0 +1,147 @@ +import { + logCommand, + PoxCommand, + Real, + Stub, + Wallet, +} from "./pox_CommandModel.ts"; +import { poxAddressToTuple } from "@stacks/stacking"; +import { expect } from "vitest"; +import { Cl, ClarityValue, cvToValue } from "@stacks/transactions"; +import { currentCycle } from "./pox_Commands.ts"; +import { tx } from "@hirosystems/clarinet-sdk"; + +type CheckFunc = ( + this: StackStxAuthCommand_Err, + model: Readonly, +) => boolean; + +export class StackStxAuthCommand_Err implements PoxCommand { + readonly wallet: Wallet; + readonly authId: number; + readonly period: number; + readonly margin: number; + readonly checkFunc: CheckFunc; + readonly errorCode: number; + + /** + * Constructs a `StackStxAuthCommand_Err` to lock uSTX for stacking. + * + * @param wallet - Represents the Stacker's wallet. + * @param authId - Unique auth-id for the authorization. + * @param period - Number of reward cycles to lock uSTX. + * @param margin - Multiplier for minimum required uSTX to stack so that each + * Stacker locks a different amount of uSTX across test runs. + * @param checkFunc - A function to check constraints for running this command. + * @param errorCode - The expected error code when running this command. + */ + constructor( + wallet: Wallet, + authId: number, + period: number, + margin: number, + checkFunc: CheckFunc, + errorCode: number, + ) { + this.wallet = wallet; + this.authId = authId; + this.period = period; + this.margin = margin; + this.checkFunc = checkFunc; + this.errorCode = errorCode; + } + + check = (model: Readonly): boolean => this.checkFunc.call(this, model); + + run(model: Stub, real: Real): void { + const currentRewCycle = currentCycle(real.network); + + // The maximum amount of uSTX that can be used (per tx) with this signer + // key. For our tests, we will use the minimum amount of uSTX to be stacked + // in the given reward cycle multiplied by the margin, which is a randomly + // generated number passed to the constructor of this class. + const maxAmount = model.stackingMinimum * this.margin; + const amountUstx = maxAmount; + + const burnBlockHeightCV = real.network.runSnippet("burn-block-height"); + const burnBlockHeight = Number( + cvToValue(burnBlockHeightCV as ClarityValue), + ); + + // Act + + // Include the authorization and the `stack-stx` transactions in a single + // block. This way we ensure both the authorization and the stack-stx + // transactions are called during the same reward cycle, so the authorization + // currentRewCycle param is relevant for the upcoming stack-stx call. + const block = real.network.mineBlock([ + tx.callPublicFn( + "ST000000000000000000002AMW42H.pox-4", + "set-signer-key-authorization", + [ + // (pox-addr (tuple (version (buff 1)) (hashbytes (buff 32)))) + poxAddressToTuple(this.wallet.btcAddress), + // (period uint) + Cl.uint(this.period), + // (reward-cycle uint) + Cl.uint(currentRewCycle), + // (topic (string-ascii 14)) + Cl.stringAscii("stack-stx"), + // (signer-key (buff 33)) + Cl.bufferFromHex(this.wallet.signerPubKey), + // (allowed bool) + Cl.bool(true), + // (max-amount uint) + Cl.uint(maxAmount), + // (auth-id uint) + Cl.uint(this.authId), + ], + this.wallet.stxAddress, + ), + tx.callPublicFn("ST000000000000000000002AMW42H.pox-4", "stack-stx", [ + // (amount-ustx uint) + Cl.uint(amountUstx), + // (pox-addr (tuple (version (buff 1)) (hashbytes (buff 32)))) + poxAddressToTuple(this.wallet.btcAddress), + // (start-burn-ht uint) + Cl.uint(burnBlockHeight), + // (lock-period uint) + Cl.uint(this.period), + // (signer-sig (optional (buff 65))) + Cl.none(), + // (signer-key (buff 33)) + Cl.bufferFromHex(this.wallet.signerPubKey), + // (max-amount uint) + Cl.uint(maxAmount), + // (auth-id uint) + Cl.uint(this.authId), + ], this.wallet.stxAddress), + ]); + + // Assert + expect(block[0].result).toBeOk(Cl.bool(true)); + expect(block[1].result).toBeErr(Cl.int(this.errorCode)); + + // Log to console for debugging purposes. This is not necessary for the + // test to pass but it is useful for debugging and eyeballing the test. + logCommand( + `â‚¿ ${model.burnBlockHeight}`, + `✗ ${this.wallet.label}`, + "stack-stx-auth", + "lock-amount", + amountUstx.toString(), + "period", + this.period.toString(), + ); + + // Refresh the model's state if the network gets to the next reward cycle. + model.refreshStateForNextRewardCycle(real); + } + + toString() { + // fast-check will call toString() in case of errors, e.g. property failed. + // It will then make a minimal counterexample, a process called 'shrinking' + // https://github.com/dubzzz/fast-check/issues/2864#issuecomment-1098002642 + return `${this.wallet.label} stack-stx auth auth-id ${this.authId} and period ${this.period}`; + } +} diff --git a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackStxSigCommand.ts b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackStxSigCommand.ts new file mode 100644 index 00000000000..d397297037b --- /dev/null +++ b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackStxSigCommand.ts @@ -0,0 +1,209 @@ +import { + isDelegating, + isStacking, + isStackingMinimumCalculated, + logCommand, + PoxCommand, + Real, + Stub, + Wallet, +} from "./pox_CommandModel.ts"; +import { Pox4SignatureTopic, poxAddressToTuple } from "@stacks/stacking"; +import { assert, expect } from "vitest"; +import { + Cl, + ClarityType, + ClarityValue, + cvToValue, + isClarityType, +} from "@stacks/transactions"; +import { currentCycle } from "./pox_Commands.ts"; + +/** + * The `StackStxSigCommand` locks STX for stacking within PoX-4. This self-service + * operation allows the `tx-sender` (the `wallet` in this case) to participate + * as a Stacker. + * + * This command calls stack-stx using a `signature`. + * + * Constraints for running this command include: + * - The Stacker cannot currently be engaged in another stacking operation. + * - A minimum threshold of uSTX must be met, determined by the + * `get-stacking-minimum` function at the time of this call. + * - The amount of uSTX locked may need to be increased in future reward cycles + * if the minimum threshold rises. + */ +export class StackStxSigCommand implements PoxCommand { + readonly wallet: Wallet; + readonly authId: number; + readonly period: number; + readonly margin: number; + + /** + * Constructs a `StackStxSigCommand` to lock uSTX for stacking. + * + * @param wallet - Represents the Stacker's wallet. + * @param authId - Unique auth-id for the authorization. + * @param period - Number of reward cycles to lock uSTX. + * @param margin - Multiplier for minimum required uSTX to stack so that each + * Stacker locks a different amount of uSTX across test runs. + */ + constructor( + wallet: Wallet, + authId: number, + period: number, + margin: number, + ) { + this.wallet = wallet; + this.authId = authId; + this.period = period; + this.margin = margin; + } + + check(model: Readonly): boolean { + // Constraints for running this command include: + // - A minimum threshold of uSTX must be met, determined by the + // `get-stacking-minimum` function at the time of this call. + // - The Stacker cannot currently be engaged in another stacking operation. + // - The Stacker cannot currently be delegating STX to a delegatee. + + const stacker = model.stackers.get(this.wallet.stxAddress)!; + return ( + isStackingMinimumCalculated(model) && + !isStacking(stacker) && + !isDelegating(stacker) + ); + } + + run(model: Stub, real: Real): void { + model.trackCommandRun(this.constructor.name); + const burnBlockHeightCV = real.network.runSnippet("burn-block-height"); + const burnBlockHeight = Number( + cvToValue(burnBlockHeightCV as ClarityValue), + ); + const currentRewCycle = currentCycle(real.network); + + // The maximum amount of uSTX that can be used (per tx) with this signer + // key. For our tests, we will use the minimum amount of uSTX to be stacked + // in the given reward cycle multiplied by the margin, which is a randomly + // generated number passed to the constructor of this class. + const maxAmount = model.stackingMinimum * this.margin; + + const signerSig = this.wallet.stackingClient.signPoxSignature({ + // The signer key being authorized. + signerPrivateKey: this.wallet.signerPrvKey, + // The reward cycle for which the authorization is valid. + // For `stack-stx` and `stack-extend`, this refers to the reward cycle + // where the transaction is confirmed. For `stack-aggregation-commit`, + // this refers to the reward cycle argument in that function. + rewardCycle: currentRewCycle, + // For `stack-stx`, this refers to `lock-period`. For `stack-extend`, + // this refers to `extend-count`. For `stack-aggregation-commit`, this is + // `u1`. + period: this.period, + // A string representing the function where this authorization is valid. + // Either `stack-stx`, `stack-extend`, `stack-increase` or `agg-commit`. + topic: Pox4SignatureTopic.StackStx, + // The PoX address that can be used with this signer key. + poxAddress: this.wallet.btcAddress, + // The unique auth-id for this authorization. + authId: this.authId, + // The maximum amount of uSTX that can be used (per tx) with this signer + // key. + maxAmount: maxAmount, + }); + + // The amount of uSTX to be locked in the reward cycle. For this test, we + // will use the maximum amount of uSTX that can be used (per tx) with this + // signer key. + const amountUstx = maxAmount; + + // Act + const stackStx = real.network.callPublicFn( + "ST000000000000000000002AMW42H.pox-4", + "stack-stx", + [ + // (amount-ustx uint) + Cl.uint(amountUstx), + // (pox-addr (tuple (version (buff 1)) (hashbytes (buff 32)))) + poxAddressToTuple(this.wallet.btcAddress), + // (start-burn-ht uint) + Cl.uint(burnBlockHeight + 1), + // (lock-period uint) + Cl.uint(this.period), + // (signer-sig (optional (buff 65))) + Cl.some(Cl.bufferFromHex(signerSig)), + // (signer-key (buff 33)) + Cl.bufferFromHex(this.wallet.signerPubKey), + // (max-amount uint) + Cl.uint(maxAmount), + // (auth-id uint) + Cl.uint(this.authId), + ], + this.wallet.stxAddress, + ); + + const { result: rewardCycle } = real.network.callReadOnlyFn( + "ST000000000000000000002AMW42H.pox-4", + "burn-height-to-reward-cycle", + [Cl.uint(burnBlockHeight)], + this.wallet.stxAddress, + ); + assert(isClarityType(rewardCycle, ClarityType.UInt)); + + const { result: unlockBurnHeight } = real.network.callReadOnlyFn( + "ST000000000000000000002AMW42H.pox-4", + "reward-cycle-to-burn-height", + [Cl.uint(Number(rewardCycle.value) + this.period + 1)], + this.wallet.stxAddress, + ); + assert(isClarityType(unlockBurnHeight, ClarityType.UInt)); + + // Assert + expect(stackStx.result).toBeOk( + Cl.tuple({ + "lock-amount": Cl.uint(amountUstx), + "signer-key": Cl.bufferFromHex(this.wallet.signerPubKey), + "stacker": Cl.principal(this.wallet.stxAddress), + "unlock-burn-height": Cl.uint(Number(unlockBurnHeight.value)), + }), + ); + + // Get the wallet from the model and update it with the new state. + const wallet = model.stackers.get(this.wallet.stxAddress)!; + // Update model so that we know this wallet is stacking. This is important + // in order to prevent the test from stacking multiple times with the same + // address. + wallet.isStacking = true; + wallet.isStackingSolo = true; + // Update locked, unlocked, and unlock-height fields in the model. + wallet.amountLocked = amountUstx; + wallet.unlockHeight = Number(unlockBurnHeight.value); + wallet.amountUnlocked -= amountUstx; + wallet.firstLockedRewardCycle = Number(rewardCycle.value) + 1; + + model.nextRewardSetIndex++; + + // Log to console for debugging purposes. This is not necessary for the + // test to pass but it is useful for debugging and eyeballing the test. + logCommand( + `â‚¿ ${model.burnBlockHeight}`, + `✓ ${this.wallet.label}`, + "stack-stx-sig", + "lock-amount", + amountUstx.toString(), + "period", + this.period.toString(), + ); + + // Refresh the model's state if the network gets to the next reward cycle. + model.refreshStateForNextRewardCycle(real); + } + + toString() { + // fast-check will call toString() in case of errors, e.g. property failed. + // It will then make a minimal counterexample, a process called 'shrinking' + // https://github.com/dubzzz/fast-check/issues/2864#issuecomment-1098002642 + return `${this.wallet.label} stack-stx sig auth-id ${this.authId} and period ${this.period}`; + } +} diff --git a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackStxSigCommand_Err.ts b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackStxSigCommand_Err.ts new file mode 100644 index 00000000000..919fa56c76a --- /dev/null +++ b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackStxSigCommand_Err.ts @@ -0,0 +1,169 @@ +import { + logCommand, + PoxCommand, + Real, + Stub, + Wallet, +} from "./pox_CommandModel.ts"; +import { Pox4SignatureTopic, poxAddressToTuple } from "@stacks/stacking"; +import { assert, expect } from "vitest"; +import { + Cl, + ClarityType, + ClarityValue, + cvToValue, + isClarityType, +} from "@stacks/transactions"; +import { currentCycle } from "./pox_Commands.ts"; + +type CheckFunc = ( + this: StackStxSigCommand_Err, + model: Readonly, +) => boolean; + +export class StackStxSigCommand_Err implements PoxCommand { + readonly wallet: Wallet; + readonly authId: number; + readonly period: number; + readonly margin: number; + readonly checkFunc: CheckFunc; + readonly errorCode: number; + + /** + * Constructs a `StackStxSigCommand_Err` to lock uSTX for stacking. + * + * @param wallet - Represents the Stacker's wallet. + * @param authId - Unique auth-id for the authorization. + * @param period - Number of reward cycles to lock uSTX. + * @param margin - Multiplier for minimum required uSTX to stack so that each + * Stacker locks a different amount of uSTX across test runs. + * @param checkFunc - A function to check constraints for running this command. + * @param errorCode - The expected error code when running this command. + */ + constructor( + wallet: Wallet, + authId: number, + period: number, + margin: number, + checkFunc: CheckFunc, + errorCode: number, + ) { + this.wallet = wallet; + this.authId = authId; + this.period = period; + this.margin = margin; + this.checkFunc = checkFunc; + this.errorCode = errorCode; + } + + check = (model: Readonly): boolean => this.checkFunc.call(this, model); + + run(model: Stub, real: Real): void { + const burnBlockHeightCV = real.network.runSnippet("burn-block-height"); + const burnBlockHeight = Number( + cvToValue(burnBlockHeightCV as ClarityValue), + ); + const currentRewCycle = currentCycle(real.network); + + // The maximum amount of uSTX that can be used (per tx) with this signer + // key. For our tests, we will use the minimum amount of uSTX to be stacked + // in the given reward cycle multiplied by the margin, which is a randomly + // generated number passed to the constructor of this class. + const maxAmount = model.stackingMinimum * this.margin; + + const signerSig = this.wallet.stackingClient.signPoxSignature({ + // The signer key being authorized. + signerPrivateKey: this.wallet.signerPrvKey, + // The reward cycle for which the authorization is valid. + // For `stack-stx` and `stack-extend`, this refers to the reward cycle + // where the transaction is confirmed. For `stack-aggregation-commit`, + // this refers to the reward cycle argument in that function. + rewardCycle: currentRewCycle, + // For `stack-stx`, this refers to `lock-period`. For `stack-extend`, + // this refers to `extend-count`. For `stack-aggregation-commit`, this is + // `u1`. + period: this.period, + // A string representing the function where this authorization is valid. + // Either `stack-stx`, `stack-extend`, `stack-increase` or `agg-commit`. + topic: Pox4SignatureTopic.StackStx, + // The PoX address that can be used with this signer key. + poxAddress: this.wallet.btcAddress, + // The unique auth-id for this authorization. + authId: this.authId, + // The maximum amount of uSTX that can be used (per tx) with this signer + // key. + maxAmount: maxAmount, + }); + + // The amount of uSTX to be locked in the reward cycle. For this test, we + // will use the maximum amount of uSTX that can be used (per tx) with this + // signer key. + const amountUstx = maxAmount; + + // Act + const stackStx = real.network.callPublicFn( + "ST000000000000000000002AMW42H.pox-4", + "stack-stx", + [ + // (amount-ustx uint) + Cl.uint(amountUstx), + // (pox-addr (tuple (version (buff 1)) (hashbytes (buff 32)))) + poxAddressToTuple(this.wallet.btcAddress), + // (start-burn-ht uint) + Cl.uint(burnBlockHeight + 1), + // (lock-period uint) + Cl.uint(this.period), + // (signer-sig (optional (buff 65))) + Cl.some(Cl.bufferFromHex(signerSig)), + // (signer-key (buff 33)) + Cl.bufferFromHex(this.wallet.signerPubKey), + // (max-amount uint) + Cl.uint(maxAmount), + // (auth-id uint) + Cl.uint(this.authId), + ], + this.wallet.stxAddress, + ); + + const { result: rewardCycle } = real.network.callReadOnlyFn( + "ST000000000000000000002AMW42H.pox-4", + "burn-height-to-reward-cycle", + [Cl.uint(burnBlockHeight)], + this.wallet.stxAddress, + ); + assert(isClarityType(rewardCycle, ClarityType.UInt)); + + const { result: unlockBurnHeight } = real.network.callReadOnlyFn( + "ST000000000000000000002AMW42H.pox-4", + "reward-cycle-to-burn-height", + [Cl.uint(Number(rewardCycle.value) + this.period + 1)], + this.wallet.stxAddress, + ); + assert(isClarityType(unlockBurnHeight, ClarityType.UInt)); + + // Assert + expect(stackStx.result).toBeErr(Cl.int(this.errorCode)); + + // Log to console for debugging purposes. This is not necessary for the + // test to pass but it is useful for debugging and eyeballing the test. + logCommand( + `â‚¿ ${model.burnBlockHeight}`, + `✗ ${this.wallet.label}`, + "stack-stx-sig", + "lock-amount", + amountUstx.toString(), + "period", + this.period.toString(), + ); + + // Refresh the model's state if the network gets to the next reward cycle. + model.refreshStateForNextRewardCycle(real); + } + + toString() { + // fast-check will call toString() in case of errors, e.g. property failed. + // It will then make a minimal counterexample, a process called 'shrinking' + // https://github.com/dubzzz/fast-check/issues/2864#issuecomment-1098002642 + return `${this.wallet.label} stack-stx sig auth-id ${this.authId} and period ${this.period}`; + } +} diff --git a/contrib/boot-contracts-stateful-prop-tests/tsconfig.json b/contrib/boot-contracts-stateful-prop-tests/tsconfig.json new file mode 100644 index 00000000000..aa218f6d429 --- /dev/null +++ b/contrib/boot-contracts-stateful-prop-tests/tsconfig.json @@ -0,0 +1,25 @@ +{ + "compilerOptions": { + "target": "ESNext", + "useDefineForClassFields": true, + "module": "ESNext", + "lib": ["ESNext"], + "skipLibCheck": true, + + "moduleResolution": "bundler", + "allowImportingTsExtensions": true, + "resolveJsonModule": true, + "isolatedModules": true, + "noEmit": true, + + "strict": true, + "noImplicitAny": true, + "noUnusedLocals": true, + "noUnusedParameters": true, + "noFallthroughCasesInSwitch": true + }, + "include": [ + "node_modules/@hirosystems/clarinet-sdk/vitest-helpers/src", + "tests" + ] +} diff --git a/contrib/boot-contracts-stateful-prop-tests/vitest.config.js b/contrib/boot-contracts-stateful-prop-tests/vitest.config.js new file mode 100644 index 00000000000..364c55f7351 --- /dev/null +++ b/contrib/boot-contracts-stateful-prop-tests/vitest.config.js @@ -0,0 +1,43 @@ +/// + +import { defineConfig } from "vite"; +import { + vitestSetupFilePath, + getClarinetVitestsArgv, +} from "@hirosystems/clarinet-sdk/vitest"; + +/* + In this file, Vitest is configured so that it works seamlessly with Clarinet and the Simnet. + + The `vitest-environment-clarinet` will initialise the clarinet-sdk + and make the `simnet` object available globally in the test files. + + `vitestSetupFilePath` points to a file in the `@hirosystems/clarinet-sdk` package that does two things: + - run `before` hooks to initialize the simnet and `after` hooks to collect costs and coverage reports. + - load custom vitest matchers to work with Clarity values (such as `expect(...).toBeUint()`) + + The `getClarinetVitestsArgv()` will parse options passed to the command `vitest run --` + - vitest run -- --manifest ./Clarinet.toml # pass a custom path + - vitest run -- --coverage --costs # collect coverage and cost reports +*/ + +export default defineConfig({ + test: { + environment: "clarinet", // use vitest-environment-clarinet + pool: "forks", + poolOptions: { + threads: { singleThread: true }, + forks: { singleFork: true }, + }, + setupFiles: [ + vitestSetupFilePath, + // custom setup files can be added here + ], + environmentOptions: { + clarinet: { + ...getClarinetVitestsArgv(), + // add or override options + }, + }, + }, +}); diff --git a/contrib/boot-contracts-unit-tests/.gitignore b/contrib/boot-contracts-unit-tests/.gitignore new file mode 100644 index 00000000000..76c2842b123 --- /dev/null +++ b/contrib/boot-contracts-unit-tests/.gitignore @@ -0,0 +1,13 @@ + +**/settings/Mainnet.toml +**/settings/Testnet.toml +.cache/** +history.txt + +logs +*.log +npm-debug.log* +coverage +*.info +costs-reports.json +node_modules diff --git a/contrib/boot-contracts-unit-tests/.vscode/settings.json b/contrib/boot-contracts-unit-tests/.vscode/settings.json new file mode 100644 index 00000000000..306251957d2 --- /dev/null +++ b/contrib/boot-contracts-unit-tests/.vscode/settings.json @@ -0,0 +1,4 @@ + +{ + "files.eol": "\n" +} diff --git a/contrib/boot-contracts-unit-tests/.vscode/tasks.json b/contrib/boot-contracts-unit-tests/.vscode/tasks.json new file mode 100644 index 00000000000..4dec0ffa984 --- /dev/null +++ b/contrib/boot-contracts-unit-tests/.vscode/tasks.json @@ -0,0 +1,19 @@ + +{ + "version": "2.0.0", + "tasks": [ + { + "label": "check contracts", + "group": "test", + "type": "shell", + "command": "clarinet check" + }, + { + "type": "npm", + "script": "test", + "group": "test", + "problemMatcher": [], + "label": "npm test" + } + ] +} diff --git a/contrib/boot-contracts-unit-tests/Clarinet.toml b/contrib/boot-contracts-unit-tests/Clarinet.toml new file mode 100644 index 00000000000..00907244f8c --- /dev/null +++ b/contrib/boot-contracts-unit-tests/Clarinet.toml @@ -0,0 +1,21 @@ +[project] +name = 'boot-contracts-unit-tests' +description = '' +authors = [] +telemetry = false +cache_dir = './.cache' +requirements = [] + +[contracts.indirect] +path = 'contracts/indirect.clar' +clarity_version = 2 +epoch = 2.4 + +[repl.analysis] +passes = ['check_checker'] + +[repl.analysis.check_checker] +strict = false +trusted_sender = false +trusted_caller = false +callee_filter = false diff --git a/contrib/boot-contracts-unit-tests/README.md b/contrib/boot-contracts-unit-tests/README.md new file mode 100644 index 00000000000..a6e6eef8f1a --- /dev/null +++ b/contrib/boot-contracts-unit-tests/README.md @@ -0,0 +1,24 @@ +# Boot contracts unit tests + +Run unit tests with clarinet on boot contracts. + +Contracts tests: + +- [x] pox-4.clar + + +## About boot contract unit testing with Clarinet + +- To really test contracts such as the pox contracts, we need to test the boot contracts embedded +into Clarinet. For example `ST000000000000000000002AMW42H.pox-4.clar` +- This mean that calling this contract will interact +- Since the boot contracts are embedded into Clarinet, we only test the version of the contract +that is in Clarinet, and not the ones that actually live in the stacks-core repository. + +We are able to get the boot contracts coverage thanks to this settings in `vitest.config.js`: +```js + includeBootContracts: true, + bootContractsPath: `${process.cwd()}/boot_contracts`, +``` +A copy of the tested boot contracts is includedin this directory as well so that we are able to +compute and render the code coverage. diff --git a/contrib/boot-contracts-unit-tests/boot_contracts/pox-4.clar b/contrib/boot-contracts-unit-tests/boot_contracts/pox-4.clar new file mode 100644 index 00000000000..9824a719314 --- /dev/null +++ b/contrib/boot-contracts-unit-tests/boot_contracts/pox-4.clar @@ -0,0 +1,1484 @@ +;; The .pox-4 contract +;; Error codes +(define-constant ERR_STACKING_UNREACHABLE 255) +(define-constant ERR_STACKING_CORRUPTED_STATE 254) +(define-constant ERR_STACKING_INSUFFICIENT_FUNDS 1) +(define-constant ERR_STACKING_INVALID_LOCK_PERIOD 2) +(define-constant ERR_STACKING_ALREADY_STACKED 3) +(define-constant ERR_STACKING_NO_SUCH_PRINCIPAL 4) +(define-constant ERR_STACKING_EXPIRED 5) +(define-constant ERR_STACKING_STX_LOCKED 6) +(define-constant ERR_STACKING_PERMISSION_DENIED 9) +(define-constant ERR_STACKING_THRESHOLD_NOT_MET 11) +(define-constant ERR_STACKING_POX_ADDRESS_IN_USE 12) +(define-constant ERR_STACKING_INVALID_POX_ADDRESS 13) + +(define-constant ERR_STACKING_INVALID_AMOUNT 18) +(define-constant ERR_NOT_ALLOWED 19) +(define-constant ERR_STACKING_ALREADY_DELEGATED 20) +(define-constant ERR_DELEGATION_EXPIRES_DURING_LOCK 21) +(define-constant ERR_DELEGATION_TOO_MUCH_LOCKED 22) +(define-constant ERR_DELEGATION_POX_ADDR_REQUIRED 23) +(define-constant ERR_INVALID_START_BURN_HEIGHT 24) +(define-constant ERR_NOT_CURRENT_STACKER 25) +(define-constant ERR_STACK_EXTEND_NOT_LOCKED 26) +(define-constant ERR_STACK_INCREASE_NOT_LOCKED 27) +(define-constant ERR_DELEGATION_NO_REWARD_SLOT 28) +(define-constant ERR_DELEGATION_WRONG_REWARD_SLOT 29) +(define-constant ERR_STACKING_IS_DELEGATED 30) +(define-constant ERR_STACKING_NOT_DELEGATED 31) +(define-constant ERR_INVALID_SIGNER_KEY 32) +(define-constant ERR_REUSED_SIGNER_KEY 33) +(define-constant ERR_DELEGATION_ALREADY_REVOKED 34) +(define-constant ERR_INVALID_SIGNATURE_PUBKEY 35) +(define-constant ERR_INVALID_SIGNATURE_RECOVER 36) +(define-constant ERR_INVALID_REWARD_CYCLE 37) +(define-constant ERR_SIGNER_AUTH_AMOUNT_TOO_HIGH 38) +(define-constant ERR_SIGNER_AUTH_USED 39) +(define-constant ERR_INVALID_INCREASE 40) + +;; Valid values for burnchain address versions. +;; These first four correspond to address hash modes in Stacks 2.1, +;; and are defined in pox-mainnet.clar and pox-testnet.clar (so they +;; cannot be defined here again). +(define-constant ADDRESS_VERSION_P2PKH 0x00) +(define-constant ADDRESS_VERSION_P2SH 0x01) +(define-constant ADDRESS_VERSION_P2WPKH 0x02) +(define-constant ADDRESS_VERSION_P2WSH 0x03) +(define-constant ADDRESS_VERSION_NATIVE_P2WPKH 0x04) +(define-constant ADDRESS_VERSION_NATIVE_P2WSH 0x05) +(define-constant ADDRESS_VERSION_NATIVE_P2TR 0x06) + +;; Values for stacks address versions +(define-constant STACKS_ADDR_VERSION_MAINNET 0x16) +(define-constant STACKS_ADDR_VERSION_TESTNET 0x1a) + +;; Keep these constants in lock-step with the address version buffs above +;; Maximum value of an address version as a uint +(define-constant MAX_ADDRESS_VERSION u6) +;; Maximum value of an address version that has a 20-byte hashbytes +;; (0x00, 0x01, 0x02, 0x03, and 0x04 have 20-byte hashbytes) +(define-constant MAX_ADDRESS_VERSION_BUFF_20 u4) +;; Maximum value of an address version that has a 32-byte hashbytes +;; (0x05 and 0x06 have 32-byte hashbytes) +(define-constant MAX_ADDRESS_VERSION_BUFF_32 u6) + +;; PoX mainnet constants +;; Min/max number of reward cycles uSTX can be locked for +(define-constant MIN_POX_REWARD_CYCLES u1) +(define-constant MAX_POX_REWARD_CYCLES u12) + +;; Default length of the PoX registration window, in burnchain blocks. +(define-constant PREPARE_CYCLE_LENGTH (if is-in-mainnet u100 u50)) + +;; Default length of the PoX reward cycle, in burnchain blocks. +(define-constant REWARD_CYCLE_LENGTH (if is-in-mainnet u2100 u1050)) + +;; Stacking thresholds +(define-constant STACKING_THRESHOLD_25 (if is-in-mainnet u20000 u8000)) + +;; SIP18 message prefix +(define-constant SIP018_MSG_PREFIX 0x534950303138) + +;; Data vars that store a copy of the burnchain configuration. +;; Implemented as data-vars, so that different configurations can be +;; used in e.g. test harnesses. +(define-data-var pox-prepare-cycle-length uint PREPARE_CYCLE_LENGTH) +(define-data-var pox-reward-cycle-length uint REWARD_CYCLE_LENGTH) +(define-data-var first-burnchain-block-height uint u0) +(define-data-var configured bool false) +(define-data-var first-pox-4-reward-cycle uint u0) + +;; This function can only be called once, when it boots up +(define-public (set-burnchain-parameters (first-burn-height uint) + (prepare-cycle-length uint) + (reward-cycle-length uint) + (begin-pox-4-reward-cycle uint)) + (begin + (asserts! (not (var-get configured)) (err ERR_NOT_ALLOWED)) + (var-set first-burnchain-block-height first-burn-height) + (var-set pox-prepare-cycle-length prepare-cycle-length) + (var-set pox-reward-cycle-length reward-cycle-length) + (var-set first-pox-4-reward-cycle begin-pox-4-reward-cycle) + (var-set configured true) + (ok true)) +) + +;; The Stacking lock-up state and associated metadata. +;; Records are inserted into this map via `stack-stx`, `delegate-stack-stx`, `stack-extend` +;; `delegate-stack-extend` and burnchain transactions for invoking `stack-stx`, etc. +;; Records will be deleted from this map when auto-unlocks are processed +;; +;; This map de-normalizes some state from the `reward-cycle-pox-address-list` map +;; and the `pox-4` contract tries to keep this state in sync with the reward-cycle +;; state. The major invariants of this `stacking-state` map are: +;; (1) any entry in `reward-cycle-pox-address-list` with `some stacker` points to a real `stacking-state` +;; (2) `stacking-state.reward-set-indexes` matches the index of that `reward-cycle-pox-address-list` +;; (3) all `stacking-state.reward-set-indexes` match the index of their reward cycle entries +;; (4) `stacking-state.pox-addr` matches `reward-cycle-pox-address-list.pox-addr` +;; (5) if set, (len reward-set-indexes) == lock-period +;; (6) (reward-cycle-to-burn-height (+ lock-period first-reward-cycle)) == (get unlock-height (stx-account stacker)) +;; These invariants only hold while `cur-reward-cycle < (+ lock-period first-reward-cycle)` +;; +(define-map stacking-state + { stacker: principal } + { + ;; Description of the underlying burnchain address that will + ;; receive PoX'ed tokens. Translating this into an address + ;; depends on the burnchain being used. When Bitcoin is + ;; the burnchain, this gets translated into a p2pkh, p2sh, + ;; p2wpkh-p2sh, p2wsh-p2sh, p2wpkh, p2wsh, or p2tr UTXO, + ;; depending on the version. The `hashbytes` field *must* be + ;; either 20 bytes or 32 bytes, depending on the output. + pox-addr: { version: (buff 1), hashbytes: (buff 32) }, + ;; how long the uSTX are locked, in reward cycles. + lock-period: uint, + ;; reward cycle when rewards begin + first-reward-cycle: uint, + ;; indexes in each reward-set associated with this user. + ;; these indexes are only valid looking forward from + ;; `first-reward-cycle` (i.e., they do not correspond + ;; to entries in the reward set that may have been from + ;; previous stack-stx calls, or prior to an extend) + reward-set-indexes: (list 12 uint), + ;; principal of the delegate, if stacker has delegated + delegated-to: (optional principal), + } +) + +;; Delegation relationships +(define-map delegation-state + { stacker: principal } + { + amount-ustx: uint, ;; how many uSTX delegated? + delegated-to: principal, ;; who are we delegating? + until-burn-ht: (optional uint), ;; how long does the delegation last? + ;; does the delegate _need_ to use a specific + ;; pox recipient address? + pox-addr: (optional { version: (buff 1), hashbytes: (buff 32) }) + } +) + +;; allowed contract-callers +(define-map allowance-contract-callers + { sender: principal, contract-caller: principal } + { until-burn-ht: (optional uint) }) + +;; How many uSTX are stacked in a given reward cycle. +;; Updated when a new PoX address is registered, or when more STX are granted +;; to it. +(define-map reward-cycle-total-stacked + { reward-cycle: uint } + { total-ustx: uint } +) + +;; Internal map read by the Stacks node to iterate through the list of +;; PoX reward addresses on a per-reward-cycle basis. +(define-map reward-cycle-pox-address-list + { reward-cycle: uint, index: uint } + { + pox-addr: { version: (buff 1), hashbytes: (buff 32) }, + total-ustx: uint, + stacker: (optional principal), + signer: (buff 33) + } +) + +(define-map reward-cycle-pox-address-list-len + { reward-cycle: uint } + { len: uint } +) + +;; how much has been locked up for this address before +;; committing? +;; this map allows stackers to stack amounts < minimum +;; by paying the cost of aggregation during the commit +(define-map partial-stacked-by-cycle + { + pox-addr: { version: (buff 1), hashbytes: (buff 32) }, + reward-cycle: uint, + sender: principal + } + { stacked-amount: uint } +) + +;; This is identical to partial-stacked-by-cycle, but its data is never deleted. +;; It is used to preserve data for downstream clients to observe aggregate +;; commits. Each key/value pair in this map is simply the last value of +;; partial-stacked-by-cycle right after it was deleted (so, subsequent calls +;; to the `stack-aggregation-*` functions will overwrite this). +(define-map logged-partial-stacked-by-cycle + { + pox-addr: { version: (buff 1), hashbytes: (buff 32) }, + reward-cycle: uint, + sender: principal + } + { stacked-amount: uint } +) + +;; State for setting authorizations for signer keys to be used in +;; certain stacking transactions. These fields match the fields used +;; in the message hash for signature-based signer key authorizations. +;; Values in this map are set in `set-signer-key-authorization`. +(define-map signer-key-authorizations + { + ;; The signer key being authorized + signer-key: (buff 33), + ;; The reward cycle for which the authorization is valid. + ;; For `stack-stx` and `stack-extend`, this refers to the reward + ;; cycle where the transaction is confirmed. For `stack-aggregation-commit`, + ;; this refers to the reward cycle argument in that function. + reward-cycle: uint, + ;; For `stack-stx`, this refers to `lock-period`. For `stack-extend`, + ;; this refers to `extend-count`. For `stack-aggregation-commit`, this is `u1`. + period: uint, + ;; A string representing the function where this authorization is valid. Either + ;; `stack-stx`, `stack-extend`, `stack-increase` or `agg-commit`. + topic: (string-ascii 14), + ;; The PoX address that can be used with this signer key + pox-addr: { version: (buff 1), hashbytes: (buff 32) }, + ;; The unique auth-id for this authorization + auth-id: uint, + ;; The maximum amount of uSTX that can be used (per tx) with this signer key + max-amount: uint, + } + bool ;; Whether the authorization can be used or not +) + +;; State for tracking used signer key authorizations. This prevents re-use +;; of the same signature or pre-set authorization for multiple transactions. +;; Refer to the `signer-key-authorizations` map for the documentation on these fields +(define-map used-signer-key-authorizations + { + signer-key: (buff 33), + reward-cycle: uint, + period: uint, + topic: (string-ascii 14), + pox-addr: { version: (buff 1), hashbytes: (buff 32) }, + auth-id: uint, + max-amount: uint, + } + bool ;; Whether the field has been used or not +) + +;; What's the reward cycle number of the burnchain block height? +;; Will runtime-abort if height is less than the first burnchain block (this is intentional) +(define-read-only (burn-height-to-reward-cycle (height uint)) + (/ (- height (var-get first-burnchain-block-height)) (var-get pox-reward-cycle-length))) + +;; What's the block height at the start of a given reward cycle? +(define-read-only (reward-cycle-to-burn-height (cycle uint)) + (+ (var-get first-burnchain-block-height) (* cycle (var-get pox-reward-cycle-length)))) + +;; What's the current PoX reward cycle? +(define-read-only (current-pox-reward-cycle) + (burn-height-to-reward-cycle burn-block-height)) + +;; Get the _current_ PoX stacking principal information. If the information +;; is expired, or if there's never been such a stacker, then returns none. +(define-read-only (get-stacker-info (stacker principal)) + (match (map-get? stacking-state { stacker: stacker }) + stacking-info + (if (<= (+ (get first-reward-cycle stacking-info) (get lock-period stacking-info)) (current-pox-reward-cycle)) + ;; present, but lock has expired + none + ;; present, and lock has not expired + (some stacking-info) + ) + ;; no state at all + none + )) + +(define-read-only (check-caller-allowed) + (or (is-eq tx-sender contract-caller) + (let ((caller-allowed + ;; if not in the caller map, return false + (unwrap! (map-get? allowance-contract-callers + { sender: tx-sender, contract-caller: contract-caller }) + false)) + (expires-at + ;; if until-burn-ht not set, then return true (because no expiry) + (unwrap! (get until-burn-ht caller-allowed) true))) + ;; is the caller allowance expired? + (if (>= burn-block-height expires-at) + false + true)))) + +(define-read-only (get-check-delegation (stacker principal)) + (let ((delegation-info (try! (map-get? delegation-state { stacker: stacker })))) + ;; did the existing delegation expire? + (if (match (get until-burn-ht delegation-info) + until-burn-ht (> burn-block-height until-burn-ht) + false) + ;; it expired, return none + none + ;; delegation is active + (some delegation-info)))) + +;; Get the size of the reward set for a reward cycle. +;; Note that this also _will_ return PoX addresses that are beneath +;; the minimum threshold -- i.e. the threshold can increase after insertion. +;; Used internally by the Stacks node, which filters out the entries +;; in this map to select PoX addresses with enough STX. +(define-read-only (get-reward-set-size (reward-cycle uint)) + (default-to + u0 + (get len (map-get? reward-cycle-pox-address-list-len { reward-cycle: reward-cycle })))) + +;; Add a single PoX address to a single reward cycle. +;; Used to build up a set of per-reward-cycle PoX addresses. +;; No checking will be done -- don't call if this PoX address is already registered in this reward cycle! +;; Returns the index into the reward cycle that the PoX address is stored to +(define-private (append-reward-cycle-pox-addr (pox-addr (tuple (version (buff 1)) (hashbytes (buff 32)))) + (reward-cycle uint) + (amount-ustx uint) + (stacker (optional principal)) + (signer (buff 33))) + (let ((sz (get-reward-set-size reward-cycle))) + (map-set reward-cycle-pox-address-list + { reward-cycle: reward-cycle, index: sz } + { pox-addr: pox-addr, total-ustx: amount-ustx, stacker: stacker, signer: signer }) + (map-set reward-cycle-pox-address-list-len + { reward-cycle: reward-cycle } + { len: (+ u1 sz) }) + sz)) + +;; How many uSTX are stacked? +(define-read-only (get-total-ustx-stacked (reward-cycle uint)) + (default-to + u0 + (get total-ustx (map-get? reward-cycle-total-stacked { reward-cycle: reward-cycle }))) +) + +;; Called internally by the node to iterate through the list of PoX addresses in this reward cycle. +;; Returns (optional (tuple (pox-addr ) (total-ustx ))) +(define-read-only (get-reward-set-pox-address (reward-cycle uint) (index uint)) + (map-get? reward-cycle-pox-address-list { reward-cycle: reward-cycle, index: index })) + +;; Add a PoX address to the `cycle-index`-th reward cycle, if `cycle-index` is between 0 and the given num-cycles (exclusive). +;; Arguments are given as a tuple, so this function can be (folded ..)'ed onto a list of its arguments. +;; Used by add-pox-addr-to-reward-cycles. +;; No checking is done. +;; The returned tuple is the same as inputted `params`, but the `i` field is incremented if +;; the pox-addr was added to the given cycle. Also, `reward-set-indexes` grows to include all +;; of the `reward-cycle-index` key parts of the `reward-cycle-pox-address-list` which get added by this function. +;; This way, the caller knows which items in a given reward cycle's PoX address list got updated. +(define-private (add-pox-addr-to-ith-reward-cycle (cycle-index uint) (params (tuple + (pox-addr (tuple (version (buff 1)) (hashbytes (buff 32)))) + (reward-set-indexes (list 12 uint)) + (first-reward-cycle uint) + (num-cycles uint) + (stacker (optional principal)) + (signer (buff 33)) + (amount-ustx uint) + (i uint)))) + (let ((reward-cycle (+ (get first-reward-cycle params) (get i params))) + (num-cycles (get num-cycles params)) + (i (get i params)) + (reward-set-index (if (< i num-cycles) + (let ((total-ustx (get-total-ustx-stacked reward-cycle)) + (reward-index + ;; record how many uSTX this pox-addr will stack for in the given reward cycle + (append-reward-cycle-pox-addr + (get pox-addr params) + reward-cycle + (get amount-ustx params) + (get stacker params) + (get signer params) + ))) + ;; update running total + (map-set reward-cycle-total-stacked + { reward-cycle: reward-cycle } + { total-ustx: (+ (get amount-ustx params) total-ustx) }) + (some reward-index)) + none)) + (next-i (if (< i num-cycles) (+ i u1) i))) + { + pox-addr: (get pox-addr params), + first-reward-cycle: (get first-reward-cycle params), + num-cycles: num-cycles, + amount-ustx: (get amount-ustx params), + stacker: (get stacker params), + signer: (get signer params), + reward-set-indexes: (match + reward-set-index new (unwrap-panic (as-max-len? (append (get reward-set-indexes params) new) u12)) + (get reward-set-indexes params)), + i: next-i + })) + +;; Add a PoX address to a given sequence of reward cycle lists. +;; A PoX address can be added to at most 12 consecutive cycles. +;; No checking is done. +(define-private (add-pox-addr-to-reward-cycles (pox-addr (tuple (version (buff 1)) (hashbytes (buff 32)))) + (first-reward-cycle uint) + (num-cycles uint) + (amount-ustx uint) + (stacker principal) + (signer (buff 33))) + (let ((cycle-indexes (list u0 u1 u2 u3 u4 u5 u6 u7 u8 u9 u10 u11)) + (results (fold add-pox-addr-to-ith-reward-cycle cycle-indexes + { pox-addr: pox-addr, first-reward-cycle: first-reward-cycle, num-cycles: num-cycles, + reward-set-indexes: (list), amount-ustx: amount-ustx, i: u0, stacker: (some stacker), signer: signer })) + (reward-set-indexes (get reward-set-indexes results))) + ;; For safety, add up the number of times (add-principal-to-ith-reward-cycle) returns 1. + ;; It _should_ be equal to num-cycles. + (asserts! (is-eq num-cycles (get i results)) (err ERR_STACKING_UNREACHABLE)) + (asserts! (is-eq num-cycles (len reward-set-indexes)) (err ERR_STACKING_UNREACHABLE)) + (ok reward-set-indexes))) + +(define-private (add-pox-partial-stacked-to-ith-cycle + (cycle-index uint) + (params { pox-addr: { version: (buff 1), hashbytes: (buff 32) }, + reward-cycle: uint, + num-cycles: uint, + amount-ustx: uint })) + (let ((pox-addr (get pox-addr params)) + (num-cycles (get num-cycles params)) + (reward-cycle (get reward-cycle params)) + (amount-ustx (get amount-ustx params))) + (let ((current-amount + (default-to u0 + (get stacked-amount + (map-get? partial-stacked-by-cycle { sender: tx-sender, pox-addr: pox-addr, reward-cycle: reward-cycle }))))) + (if (>= cycle-index num-cycles) + ;; do not add to cycles >= cycle-index + false + ;; otherwise, add to the partial-stacked-by-cycle + (map-set partial-stacked-by-cycle + { sender: tx-sender, pox-addr: pox-addr, reward-cycle: reward-cycle } + { stacked-amount: (+ amount-ustx current-amount) })) + ;; produce the next params tuple + { pox-addr: pox-addr, + reward-cycle: (+ u1 reward-cycle), + num-cycles: num-cycles, + amount-ustx: amount-ustx }))) + +;; Add a PoX address to a given sequence of partial reward cycle lists. +;; A PoX address can be added to at most 12 consecutive cycles. +;; No checking is done. +(define-private (add-pox-partial-stacked (pox-addr (tuple (version (buff 1)) (hashbytes (buff 32)))) + (first-reward-cycle uint) + (num-cycles uint) + (amount-ustx uint)) + (let ((cycle-indexes (list u0 u1 u2 u3 u4 u5 u6 u7 u8 u9 u10 u11))) + (fold add-pox-partial-stacked-to-ith-cycle cycle-indexes + { pox-addr: pox-addr, reward-cycle: first-reward-cycle, num-cycles: num-cycles, amount-ustx: amount-ustx }) + true)) + +;; What is the minimum number of uSTX to be stacked in the given reward cycle? +;; Used internally by the Stacks node, and visible publicly. +(define-read-only (get-stacking-minimum) + (/ stx-liquid-supply STACKING_THRESHOLD_25)) + +;; Is the address mode valid for a PoX address? +(define-read-only (check-pox-addr-version (version (buff 1))) + (<= (buff-to-uint-be version) MAX_ADDRESS_VERSION)) + +;; Is this buffer the right length for the given PoX address? +(define-read-only (check-pox-addr-hashbytes (version (buff 1)) (hashbytes (buff 32))) + (if (<= (buff-to-uint-be version) MAX_ADDRESS_VERSION_BUFF_20) + (is-eq (len hashbytes) u20) + (if (<= (buff-to-uint-be version) MAX_ADDRESS_VERSION_BUFF_32) + (is-eq (len hashbytes) u32) + false))) + +;; Is the given lock period valid? +(define-read-only (check-pox-lock-period (lock-period uint)) + (and (>= lock-period MIN_POX_REWARD_CYCLES) + (<= lock-period MAX_POX_REWARD_CYCLES))) + +;; Evaluate if a participant can stack an amount of STX for a given period. +;; This method is designed as a read-only method so that it can be used as +;; a set of guard conditions and also as a read-only RPC call that can be +;; performed beforehand. +(define-read-only (can-stack-stx (pox-addr (tuple (version (buff 1)) (hashbytes (buff 32)))) + (amount-ustx uint) + (first-reward-cycle uint) + (num-cycles uint)) + (begin + ;; minimum uSTX must be met + (asserts! (<= (get-stacking-minimum) amount-ustx) + (err ERR_STACKING_THRESHOLD_NOT_MET)) + + (minimal-can-stack-stx pox-addr amount-ustx first-reward-cycle num-cycles))) + +;; Evaluate if a participant can stack an amount of STX for a given period. +;; This method is designed as a read-only method so that it can be used as +;; a set of guard conditions and also as a read-only RPC call that can be +;; performed beforehand. +(define-read-only (minimal-can-stack-stx + (pox-addr (tuple (version (buff 1)) (hashbytes (buff 32)))) + (amount-ustx uint) + (first-reward-cycle uint) + (num-cycles uint)) + (begin + ;; amount must be valid + (asserts! (> amount-ustx u0) + (err ERR_STACKING_INVALID_AMOUNT)) + + ;; lock period must be in acceptable range. + (asserts! (check-pox-lock-period num-cycles) + (err ERR_STACKING_INVALID_LOCK_PERIOD)) + + ;; address version must be valid + (asserts! (check-pox-addr-version (get version pox-addr)) + (err ERR_STACKING_INVALID_POX_ADDRESS)) + + ;; address hashbytes must be valid for the version + (asserts! (check-pox-addr-hashbytes (get version pox-addr) (get hashbytes pox-addr)) + (err ERR_STACKING_INVALID_POX_ADDRESS)) + + (ok true))) + +;; Revoke contract-caller authorization to call stacking methods +(define-public (disallow-contract-caller (caller principal)) + (begin + (asserts! (is-eq tx-sender contract-caller) + (err ERR_STACKING_PERMISSION_DENIED)) + (ok (map-delete allowance-contract-callers { sender: tx-sender, contract-caller: caller })))) + +;; Give a contract-caller authorization to call stacking methods +;; normally, stacking methods may only be invoked by _direct_ transactions +;; (i.e., the tx-sender issues a direct contract-call to the stacking methods) +;; by issuing an allowance, the tx-sender may call through the allowed contract +(define-public (allow-contract-caller (caller principal) (until-burn-ht (optional uint))) + (begin + (asserts! (is-eq tx-sender contract-caller) + (err ERR_STACKING_PERMISSION_DENIED)) + (ok (map-set allowance-contract-callers + { sender: tx-sender, contract-caller: caller } + { until-burn-ht: until-burn-ht })))) + +;; Lock up some uSTX for stacking! Note that the given amount here is in micro-STX (uSTX). +;; The STX will be locked for the given number of reward cycles (lock-period). +;; This is the self-service interface. tx-sender will be the Stacker. +;; +;; * The given stacker cannot currently be stacking. +;; * You will need the minimum uSTX threshold. This will be determined by (get-stacking-minimum) +;; at the time this method is called. +;; * You may need to increase the amount of uSTX locked up later, since the minimum uSTX threshold +;; may increase between reward cycles. +;; * You need to provide a signer key to be used in the signer DKG process. +;; * The Stacker will receive rewards in the reward cycle following `start-burn-ht`. +;; Importantly, `start-burn-ht` may not be further into the future than the next reward cycle, +;; and in most cases should be set to the current burn block height. +;; +;; To ensure that the Stacker is authorized to use the provided `signer-key`, the stacker +;; must provide either a signature have an authorization already saved. Refer to +;; `verify-signer-key-sig` for more information. +;; +;; The tokens will unlock and be returned to the Stacker (tx-sender) automatically. +(define-public (stack-stx (amount-ustx uint) + (pox-addr (tuple (version (buff 1)) (hashbytes (buff 32)))) + (start-burn-ht uint) + (lock-period uint) + (signer-sig (optional (buff 65))) + (signer-key (buff 33)) + (max-amount uint) + (auth-id uint)) + ;; this stacker's first reward cycle is the _next_ reward cycle + (let ((first-reward-cycle (+ u1 (current-pox-reward-cycle))) + (specified-reward-cycle (+ u1 (burn-height-to-reward-cycle start-burn-ht)))) + ;; the start-burn-ht must result in the next reward cycle, do not allow stackers + ;; to "post-date" their `stack-stx` transaction + (asserts! (is-eq first-reward-cycle specified-reward-cycle) + (err ERR_INVALID_START_BURN_HEIGHT)) + + ;; must be called directly by the tx-sender or by an allowed contract-caller + (asserts! (check-caller-allowed) + (err ERR_STACKING_PERMISSION_DENIED)) + + ;; tx-sender principal must not be stacking + (asserts! (is-none (get-stacker-info tx-sender)) + (err ERR_STACKING_ALREADY_STACKED)) + + ;; tx-sender must not be delegating + (asserts! (is-none (get-check-delegation tx-sender)) + (err ERR_STACKING_ALREADY_DELEGATED)) + + ;; the Stacker must have sufficient unlocked funds + (asserts! (>= (stx-get-balance tx-sender) amount-ustx) + (err ERR_STACKING_INSUFFICIENT_FUNDS)) + + ;; Validate ownership of the given signer key + (try! (consume-signer-key-authorization pox-addr (- first-reward-cycle u1) "stack-stx" lock-period signer-sig signer-key amount-ustx max-amount auth-id)) + + ;; ensure that stacking can be performed + (try! (can-stack-stx pox-addr amount-ustx first-reward-cycle lock-period)) + + ;; register the PoX address with the amount stacked + (let ((reward-set-indexes (try! (add-pox-addr-to-reward-cycles pox-addr first-reward-cycle lock-period amount-ustx tx-sender signer-key)))) + ;; add stacker record + (map-set stacking-state + { stacker: tx-sender } + { pox-addr: pox-addr, + reward-set-indexes: reward-set-indexes, + first-reward-cycle: first-reward-cycle, + lock-period: lock-period, + delegated-to: none }) + + ;; return the lock-up information, so the node can actually carry out the lock. + (ok { stacker: tx-sender, lock-amount: amount-ustx, signer-key: signer-key, unlock-burn-height: (reward-cycle-to-burn-height (+ first-reward-cycle lock-period)) })))) + +;; Revokes the delegation to the current stacking pool. +;; New in pox-4: Fails if the delegation was already revoked. +;; Returns the last delegation state. +(define-public (revoke-delegate-stx) + (let ((last-delegation-state (get-check-delegation tx-sender))) + ;; must be called directly by the tx-sender or by an allowed contract-caller + (asserts! (check-caller-allowed) + (err ERR_STACKING_PERMISSION_DENIED)) + (asserts! (is-some last-delegation-state) (err ERR_DELEGATION_ALREADY_REVOKED)) + (asserts! (map-delete delegation-state { stacker: tx-sender }) (err ERR_DELEGATION_ALREADY_REVOKED)) + (ok last-delegation-state))) + +;; Delegate to `delegate-to` the ability to stack from a given address. +;; This method _does not_ lock the funds, rather, it allows the delegate +;; to issue the stacking lock. +;; The caller specifies: +;; * amount-ustx: the total amount of ustx the delegate may be allowed to lock +;; * until-burn-ht: an optional burn height at which this delegation expires +;; * pox-addr: an optional address to which any rewards *must* be sent +(define-public (delegate-stx (amount-ustx uint) + (delegate-to principal) + (until-burn-ht (optional uint)) + (pox-addr (optional { version: (buff 1), hashbytes: (buff 32) }))) + + (begin + ;; must be called directly by the tx-sender or by an allowed contract-caller + (asserts! (check-caller-allowed) + (err ERR_STACKING_PERMISSION_DENIED)) + + ;; delegate-stx no longer requires the delegator to not currently + ;; be stacking. + ;; delegate-stack-* functions assert that + ;; 1. users can't swim in two pools at the same time. + ;; 2. users can't switch pools without cool down cycle. + ;; Other pool admins can't increase or extend. + ;; 3. users can't join a pool while already directly stacking. + + ;; pox-addr, if given, must be valid + (match pox-addr + address + (asserts! (check-pox-addr-version (get version address)) + (err ERR_STACKING_INVALID_POX_ADDRESS)) + true) + + (match pox-addr + pox-tuple + (asserts! (check-pox-addr-hashbytes (get version pox-tuple) (get hashbytes pox-tuple)) + (err ERR_STACKING_INVALID_POX_ADDRESS)) + true) + + ;; tx-sender must not be delegating + (asserts! (is-none (get-check-delegation tx-sender)) + (err ERR_STACKING_ALREADY_DELEGATED)) + + ;; add delegation record + (map-set delegation-state + { stacker: tx-sender } + { amount-ustx: amount-ustx, + delegated-to: delegate-to, + until-burn-ht: until-burn-ht, + pox-addr: pox-addr }) + + (ok true))) + +;; Generate a message hash for validating a signer key. +;; The message hash follows SIP018 for signing structured data. The structured data +;; is the tuple `{ pox-addr: { version, hashbytes }, reward-cycle, auth-id, max-amount }`. +;; The domain is `{ name: "pox-4-signer", version: "1.0.0", chain-id: chain-id }`. +(define-read-only (get-signer-key-message-hash (pox-addr { version: (buff 1), hashbytes: (buff 32) }) + (reward-cycle uint) + (topic (string-ascii 14)) + (period uint) + (max-amount uint) + (auth-id uint)) + (sha256 (concat + SIP018_MSG_PREFIX + (concat + (sha256 (unwrap-panic (to-consensus-buff? { name: "pox-4-signer", version: "1.0.0", chain-id: chain-id }))) + (sha256 (unwrap-panic + (to-consensus-buff? { + pox-addr: pox-addr, + reward-cycle: reward-cycle, + topic: topic, + period: period, + auth-id: auth-id, + max-amount: max-amount, + }))))))) + +;; Verify a signature from the signing key for this specific stacker. +;; See `get-signer-key-message-hash` for details on the message hash. +;; +;; Note that `reward-cycle` corresponds to the _current_ reward cycle, +;; when used with `stack-stx` and `stack-extend`. Both the reward cycle and +;; the lock period are inflexible, which means that the stacker must confirm their transaction +;; during the exact reward cycle and with the exact period that the signature or authorization was +;; generated for. +;; +;; The `amount` field is checked to ensure it is not larger than `max-amount`, which is +;; a field in the authorization. `auth-id` is a random uint to prevent authorization +;; replays. +;; +;; This function does not verify the payload of the authorization. The caller of +;; this function must ensure that the payload (reward cycle, period, topic, and pox-addr) +;; are valid according to the caller function's requirements. +;; +;; When `signer-sig` is present, the public key is recovered from the signature +;; and compared to `signer-key`. If `signer-sig` is `none`, the function verifies that an authorization was previously +;; added for this key. +;; +;; This function checks to ensure that the authorization hasn't been used yet, but it +;; does _not_ store the authorization as used. The function `consume-signer-key-authorization` +;; handles that, and this read-only function is exposed for client-side verification. +(define-read-only (verify-signer-key-sig (pox-addr { version: (buff 1), hashbytes: (buff 32) }) + (reward-cycle uint) + (topic (string-ascii 14)) + (period uint) + (signer-sig-opt (optional (buff 65))) + (signer-key (buff 33)) + (amount uint) + (max-amount uint) + (auth-id uint)) + (begin + ;; Validate that amount is less than or equal to `max-amount` + (asserts! (>= max-amount amount) (err ERR_SIGNER_AUTH_AMOUNT_TOO_HIGH)) + (asserts! (is-none (map-get? used-signer-key-authorizations { signer-key: signer-key, reward-cycle: reward-cycle, topic: topic, period: period, pox-addr: pox-addr, auth-id: auth-id, max-amount: max-amount })) + (err ERR_SIGNER_AUTH_USED)) + (match signer-sig-opt + ;; `signer-sig` is present, verify the signature + signer-sig (ok (asserts! + (is-eq + (unwrap! (secp256k1-recover? + (get-signer-key-message-hash pox-addr reward-cycle topic period max-amount auth-id) + signer-sig) (err ERR_INVALID_SIGNATURE_RECOVER)) + signer-key) + (err ERR_INVALID_SIGNATURE_PUBKEY))) + ;; `signer-sig` is not present, verify that an authorization was previously added for this key + (ok (asserts! (default-to false (map-get? signer-key-authorizations + { signer-key: signer-key, reward-cycle: reward-cycle, period: period, topic: topic, pox-addr: pox-addr, auth-id: auth-id, max-amount: max-amount })) + (err ERR_NOT_ALLOWED))) + )) + ) + +;; This function does two things: +;; +;; - Verify that a signer key is authorized to be used +;; - Updates the `used-signer-key-authorizations` map to prevent reuse +;; +;; This "wrapper" method around `verify-signer-key-sig` allows that function to remain +;; read-only, so that it can be used by clients as a sanity check before submitting a transaction. +(define-private (consume-signer-key-authorization (pox-addr { version: (buff 1), hashbytes: (buff 32) }) + (reward-cycle uint) + (topic (string-ascii 14)) + (period uint) + (signer-sig-opt (optional (buff 65))) + (signer-key (buff 33)) + (amount uint) + (max-amount uint) + (auth-id uint)) + (begin + ;; verify the authorization + (try! (verify-signer-key-sig pox-addr reward-cycle topic period signer-sig-opt signer-key amount max-amount auth-id)) + ;; update the `used-signer-key-authorizations` map + (asserts! (map-insert used-signer-key-authorizations + { signer-key: signer-key, reward-cycle: reward-cycle, topic: topic, period: period, pox-addr: pox-addr, auth-id: auth-id, max-amount: max-amount } true) + (err ERR_SIGNER_AUTH_USED)) + (ok true))) + +;; Commit partially stacked STX and allocate a new PoX reward address slot. +;; This allows a stacker/delegate to lock fewer STX than the minimal threshold in multiple transactions, +;; so long as: 1. The pox-addr is the same. +;; 2. This "commit" transaction is called _before_ the PoX anchor block. +;; This ensures that each entry in the reward set returned to the stacks-node is greater than the threshold, +;; but does not require it be all locked up within a single transaction +;; +;; Returns (ok uint) on success, where the given uint is the reward address's index in the list of reward +;; addresses allocated in this reward cycle. This index can then be passed to `stack-aggregation-increase` +;; to later increment the STX this PoX address represents, in amounts less than the stacking minimum. +;; +;; *New in Stacks 2.1.* +(define-private (inner-stack-aggregation-commit (pox-addr { version: (buff 1), hashbytes: (buff 32) }) + (reward-cycle uint) + (signer-sig (optional (buff 65))) + (signer-key (buff 33)) + (max-amount uint) + (auth-id uint)) + (let ((partial-stacked + ;; fetch the partial commitments + (unwrap! (map-get? partial-stacked-by-cycle { pox-addr: pox-addr, sender: tx-sender, reward-cycle: reward-cycle }) + (err ERR_STACKING_NO_SUCH_PRINCIPAL)))) + ;; must be called directly by the tx-sender or by an allowed contract-caller + (asserts! (check-caller-allowed) + (err ERR_STACKING_PERMISSION_DENIED)) + (let ((amount-ustx (get stacked-amount partial-stacked))) + (try! (consume-signer-key-authorization pox-addr reward-cycle "agg-commit" u1 signer-sig signer-key amount-ustx max-amount auth-id)) + (try! (can-stack-stx pox-addr amount-ustx reward-cycle u1)) + ;; Add the pox addr to the reward cycle, and extract the index of the PoX address + ;; so the delegator can later use it to call stack-aggregation-increase. + (let ((add-pox-addr-info + (add-pox-addr-to-ith-reward-cycle + u0 + { pox-addr: pox-addr, + first-reward-cycle: reward-cycle, + num-cycles: u1, + reward-set-indexes: (list), + stacker: none, + signer: signer-key, + amount-ustx: amount-ustx, + i: u0 })) + (pox-addr-index (unwrap-panic + (element-at (get reward-set-indexes add-pox-addr-info) u0)))) + + ;; don't update the stacking-state map, + ;; because it _already has_ this stacker's state + ;; don't lock the STX, because the STX is already locked + ;; + ;; clear the partial-stacked state, and log it + (map-delete partial-stacked-by-cycle { pox-addr: pox-addr, sender: tx-sender, reward-cycle: reward-cycle }) + (map-set logged-partial-stacked-by-cycle { pox-addr: pox-addr, sender: tx-sender, reward-cycle: reward-cycle } partial-stacked) + (ok pox-addr-index))))) + +;; Legacy interface for stack-aggregation-commit. +;; Wraps inner-stack-aggregation-commit. See its docstring for details. +;; Returns (ok true) on success +;; Returns (err ...) on failure. +(define-public (stack-aggregation-commit (pox-addr { version: (buff 1), hashbytes: (buff 32) }) + (reward-cycle uint) + (signer-sig (optional (buff 65))) + (signer-key (buff 33)) + (max-amount uint) + (auth-id uint)) + (match (inner-stack-aggregation-commit pox-addr reward-cycle signer-sig signer-key max-amount auth-id) + pox-addr-index (ok true) + commit-err (err commit-err))) + +;; Public interface to `inner-stack-aggregation-commit`. See its documentation for details. +;; *New in Stacks 2.1.* +(define-public (stack-aggregation-commit-indexed (pox-addr { version: (buff 1), hashbytes: (buff 32) }) + (reward-cycle uint) + (signer-sig (optional (buff 65))) + (signer-key (buff 33)) + (max-amount uint) + (auth-id uint)) + (inner-stack-aggregation-commit pox-addr reward-cycle signer-sig signer-key max-amount auth-id)) + +;; Commit partially stacked STX to a PoX address which has already received some STX (more than the Stacking min). +;; This allows a delegator to lock up marginally more STX from new delegates, even if they collectively do not +;; exceed the Stacking minimum, so long as the target PoX address already represents at least as many STX as the +;; Stacking minimum. +;; +;; The `reward-cycle-index` is emitted as a contract event from `stack-aggregation-commit` when the initial STX are +;; locked up by this delegator. It must be passed here to add more STX behind this PoX address. If the delegator +;; called `stack-aggregation-commit` multiple times for the same PoX address, then any such `reward-cycle-index` will +;; work here. +;; +;; *New in Stacks 2.1* +;; +(define-public (stack-aggregation-increase (pox-addr { version: (buff 1), hashbytes: (buff 32) }) + (reward-cycle uint) + (reward-cycle-index uint) + (signer-sig (optional (buff 65))) + (signer-key (buff 33)) + (max-amount uint) + (auth-id uint)) + (let ((partial-stacked + ;; fetch the partial commitments + (unwrap! (map-get? partial-stacked-by-cycle { pox-addr: pox-addr, sender: tx-sender, reward-cycle: reward-cycle }) + (err ERR_STACKING_NO_SUCH_PRINCIPAL)))) + + ;; must be called directly by the tx-sender or by an allowed contract-caller + (asserts! (check-caller-allowed) + (err ERR_STACKING_PERMISSION_DENIED)) + + ;; reward-cycle must be in the future + (asserts! (> reward-cycle (current-pox-reward-cycle)) + (err ERR_STACKING_INVALID_LOCK_PERIOD)) + + (let ((partial-amount-ustx (get stacked-amount partial-stacked)) + ;; reward-cycle and reward-cycle-index must point to an existing record in reward-cycle-pox-address-list + (existing-entry (unwrap! (map-get? reward-cycle-pox-address-list { reward-cycle: reward-cycle, index: reward-cycle-index }) + (err ERR_DELEGATION_NO_REWARD_SLOT))) + ;; reward-cycle must point to an existing record in reward-cycle-total-stacked + ;; infallible; getting existing-entry succeeded so this must succeed + (existing-cycle (unwrap-panic (map-get? reward-cycle-total-stacked { reward-cycle: reward-cycle }))) + (increased-entry-total (+ (get total-ustx existing-entry) partial-amount-ustx)) + (increased-cycle-total (+ (get total-ustx existing-cycle) partial-amount-ustx)) + (existing-signer-key (get signer existing-entry))) + + ;; must be stackable + (try! (minimal-can-stack-stx pox-addr increased-entry-total reward-cycle u1)) + + ;; new total must exceed the stacking minimum + (asserts! (<= (get-stacking-minimum) increased-entry-total) + (err ERR_STACKING_THRESHOLD_NOT_MET)) + + ;; there must *not* be a stacker entry (since this is a delegator) + (asserts! (is-none (get stacker existing-entry)) + (err ERR_DELEGATION_WRONG_REWARD_SLOT)) + + ;; the given PoX address must match the one on record + (asserts! (is-eq pox-addr (get pox-addr existing-entry)) + (err ERR_DELEGATION_WRONG_REWARD_SLOT)) + + ;; Validate that amount is less than or equal to `max-amount` + (asserts! (>= max-amount increased-entry-total) (err ERR_SIGNER_AUTH_AMOUNT_TOO_HIGH)) + + ;; Validate that signer-key matches the existing signer-key + (asserts! (is-eq existing-signer-key signer-key) (err ERR_INVALID_SIGNER_KEY)) + + ;; Verify signature from delegate that allows this sender for this cycle + ;; 'lock-period' param set to one period, same as aggregation-commit-indexed + (try! (consume-signer-key-authorization pox-addr reward-cycle "agg-increase" u1 signer-sig signer-key increased-entry-total max-amount auth-id)) + + ;; update the pox-address list -- bump the total-ustx + (map-set reward-cycle-pox-address-list + { reward-cycle: reward-cycle, index: reward-cycle-index } + { pox-addr: pox-addr, + total-ustx: increased-entry-total, + stacker: none, + signer: signer-key }) + + ;; update the total ustx in this cycle + (map-set reward-cycle-total-stacked + { reward-cycle: reward-cycle } + { total-ustx: increased-cycle-total }) + + ;; don't update the stacking-state map, + ;; because it _already has_ this stacker's state + ;; don't lock the STX, because the STX is already locked + ;; + ;; clear the partial-stacked state, and log it + (map-delete partial-stacked-by-cycle { pox-addr: pox-addr, sender: tx-sender, reward-cycle: reward-cycle }) + (map-set logged-partial-stacked-by-cycle { pox-addr: pox-addr, sender: tx-sender, reward-cycle: reward-cycle } partial-stacked) + (ok true)))) + +;; As a delegate, stack the given principal's STX using partial-stacked-by-cycle +;; Once the delegate has stacked > minimum, the delegate should call stack-aggregation-commit +(define-public (delegate-stack-stx (stacker principal) + (amount-ustx uint) + (pox-addr { version: (buff 1), hashbytes: (buff 32) }) + (start-burn-ht uint) + (lock-period uint)) + ;; this stacker's first reward cycle is the _next_ reward cycle + (let ((first-reward-cycle (+ u1 (current-pox-reward-cycle))) + (specified-reward-cycle (+ u1 (burn-height-to-reward-cycle start-burn-ht))) + (unlock-burn-height (reward-cycle-to-burn-height (+ (current-pox-reward-cycle) u1 lock-period)))) + ;; the start-burn-ht must result in the next reward cycle, do not allow stackers + ;; to "post-date" their `stack-stx` transaction + (asserts! (is-eq first-reward-cycle specified-reward-cycle) + (err ERR_INVALID_START_BURN_HEIGHT)) + + ;; must be called directly by the tx-sender or by an allowed contract-caller + (asserts! (check-caller-allowed) + (err ERR_STACKING_PERMISSION_DENIED)) + + ;; stacker must have delegated to the caller + (let ((delegation-info (unwrap! (get-check-delegation stacker) (err ERR_STACKING_PERMISSION_DENIED)))) + ;; must have delegated to tx-sender + (asserts! (is-eq (get delegated-to delegation-info) tx-sender) + (err ERR_STACKING_PERMISSION_DENIED)) + ;; must have delegated enough stx + (asserts! (>= (get amount-ustx delegation-info) amount-ustx) + (err ERR_DELEGATION_TOO_MUCH_LOCKED)) + ;; if pox-addr is set, must be equal to pox-addr + (asserts! (match (get pox-addr delegation-info) + specified-pox-addr (is-eq pox-addr specified-pox-addr) + true) + (err ERR_DELEGATION_POX_ADDR_REQUIRED)) + ;; delegation must not expire before lock period + (asserts! (match (get until-burn-ht delegation-info) + until-burn-ht (>= until-burn-ht + unlock-burn-height) + true) + (err ERR_DELEGATION_EXPIRES_DURING_LOCK)) + ) + + ;; stacker principal must not be stacking + (asserts! (is-none (get-stacker-info stacker)) + (err ERR_STACKING_ALREADY_STACKED)) + + ;; the Stacker must have sufficient unlocked funds + (asserts! (>= (stx-get-balance stacker) amount-ustx) + (err ERR_STACKING_INSUFFICIENT_FUNDS)) + + ;; ensure that stacking can be performed + (try! (minimal-can-stack-stx pox-addr amount-ustx first-reward-cycle lock-period)) + + ;; register the PoX address with the amount stacked via partial stacking + ;; before it can be included in the reward set, this must be committed! + (add-pox-partial-stacked pox-addr first-reward-cycle lock-period amount-ustx) + + ;; add stacker record + (map-set stacking-state + { stacker: stacker } + { pox-addr: pox-addr, + first-reward-cycle: first-reward-cycle, + reward-set-indexes: (list), + lock-period: lock-period, + delegated-to: (some tx-sender) }) + + ;; return the lock-up information, so the node can actually carry out the lock. + (ok { stacker: stacker, + lock-amount: amount-ustx, + unlock-burn-height: unlock-burn-height }))) + + +;; Used for PoX parameters discovery +(define-read-only (get-pox-info) + (ok { + min-amount-ustx: (get-stacking-minimum), + reward-cycle-id: (current-pox-reward-cycle), + prepare-cycle-length: (var-get pox-prepare-cycle-length), + first-burnchain-block-height: (var-get first-burnchain-block-height), + reward-cycle-length: (var-get pox-reward-cycle-length), + total-liquid-supply-ustx: stx-liquid-supply, + }) +) + +;; Update the number of stacked STX in a given reward cycle entry. +;; `reward-cycle-index` is the index into the `reward-cycle-pox-address-list` map for a given reward cycle number. +;; `updates`, if `(some ..)`, encodes which PoX reward cycle entry (if any) gets updated. In particular, it must have +;; `(some stacker)` as the listed stacker, and must be an upcoming reward cycle. +(define-private (increase-reward-cycle-entry + (reward-cycle-index uint) + (updates (optional { first-cycle: uint, reward-cycle: uint, stacker: principal, add-amount: uint, signer-key: (buff 33) }))) + (let ((data (try! updates)) + (first-cycle (get first-cycle data)) + (reward-cycle (get reward-cycle data)) + (passed-signer-key (get signer-key data))) + (if (> first-cycle reward-cycle) + ;; not at first cycle to process yet + (some { first-cycle: first-cycle, reward-cycle: (+ u1 reward-cycle), stacker: (get stacker data), add-amount: (get add-amount data), signer-key: (get signer-key data) }) + (let ((existing-entry (unwrap-panic (map-get? reward-cycle-pox-address-list { reward-cycle: reward-cycle, index: reward-cycle-index }))) + (existing-total (unwrap-panic (map-get? reward-cycle-total-stacked { reward-cycle: reward-cycle }))) + (existing-signer-key (get signer existing-entry)) + (add-amount (get add-amount data)) + (total-ustx (+ (get total-ustx existing-total) add-amount))) + ;; stacker must match + (asserts! (is-eq (get stacker existing-entry) (some (get stacker data))) none) + ;; signer-key must match + (asserts! (is-eq existing-signer-key passed-signer-key) none) + ;; update the pox-address list + (map-set reward-cycle-pox-address-list + { reward-cycle: reward-cycle, index: reward-cycle-index } + { pox-addr: (get pox-addr existing-entry), + ;; This addresses the bug in pox-2 (see SIP-022) + total-ustx: (+ (get total-ustx existing-entry) add-amount), + stacker: (some (get stacker data)), + signer: (get signer existing-entry) }) + ;; update the total + (map-set reward-cycle-total-stacked + { reward-cycle: reward-cycle } + { total-ustx: total-ustx }) + (some { first-cycle: first-cycle, + reward-cycle: (+ u1 reward-cycle), + stacker: (get stacker data), + add-amount: (get add-amount data), + signer-key: passed-signer-key }))))) + +;; Increase the number of STX locked. +;; *New in Stacks 2.1* +;; This method locks up an additional amount of STX from `tx-sender`'s, indicated +;; by `increase-by`. The `tx-sender` must already be Stacking & must not be +;; straddling more than one signer-key for the cycles effected. +;; Refer to `verify-signer-key-sig` for more information on the authorization parameters +;; included here. +(define-public (stack-increase + (increase-by uint) + (signer-sig (optional (buff 65))) + (signer-key (buff 33)) + (max-amount uint) + (auth-id uint)) + (let ((stacker-info (stx-account tx-sender)) + (amount-stacked (get locked stacker-info)) + (amount-unlocked (get unlocked stacker-info)) + (unlock-height (get unlock-height stacker-info)) + (cur-cycle (current-pox-reward-cycle)) + (first-increased-cycle (+ cur-cycle u1)) + (stacker-state (unwrap! (map-get? stacking-state + { stacker: tx-sender }) + (err ERR_STACK_INCREASE_NOT_LOCKED))) + (cur-pox-addr (get pox-addr stacker-state)) + (cur-period (get lock-period stacker-state))) + ;; tx-sender must be currently locked + (asserts! (> amount-stacked u0) + (err ERR_STACK_INCREASE_NOT_LOCKED)) + ;; must be called with positive `increase-by` + (asserts! (>= increase-by u1) + (err ERR_STACKING_INVALID_AMOUNT)) + ;; stacker must have enough stx to lock + (asserts! (>= amount-unlocked increase-by) + (err ERR_STACKING_INSUFFICIENT_FUNDS)) + ;; must be called directly by the tx-sender or by an allowed contract-caller + (asserts! (check-caller-allowed) + (err ERR_STACKING_PERMISSION_DENIED)) + ;; stacker must be directly stacking + (asserts! (> (len (get reward-set-indexes stacker-state)) u0) + (err ERR_STACKING_IS_DELEGATED)) + ;; stacker must not be delegating + (asserts! (is-none (get delegated-to stacker-state)) + (err ERR_STACKING_IS_DELEGATED)) + + ;; Validate that amount is less than or equal to `max-amount` + (asserts! (>= max-amount (+ increase-by amount-stacked)) (err ERR_SIGNER_AUTH_AMOUNT_TOO_HIGH)) + + ;; Verify signature from delegate that allows this sender for this cycle + (try! (consume-signer-key-authorization cur-pox-addr cur-cycle "stack-increase" cur-period signer-sig signer-key increase-by max-amount auth-id)) + + ;; update reward cycle amounts + (asserts! (is-some (fold increase-reward-cycle-entry + (get reward-set-indexes stacker-state) + (some { first-cycle: first-increased-cycle, + reward-cycle: (get first-reward-cycle stacker-state), + stacker: tx-sender, + add-amount: increase-by, + signer-key: signer-key }))) + (err ERR_INVALID_INCREASE)) + ;; NOTE: stacking-state map is unchanged: it does not track amount-stacked in PoX-4 + (ok { stacker: tx-sender, total-locked: (+ amount-stacked increase-by)}))) + +;; Extend an active Stacking lock. +;; *New in Stacks 2.1* +;; This method extends the `tx-sender`'s current lockup for an additional `extend-count` +;; and associates `pox-addr` with the rewards, The `signer-key` will be the key +;; used for signing. The `tx-sender` can thus decide to change the key when extending. +;; +;; Because no additional STX are locked in this function, the `amount` field used +;; to verify the signer key authorization is zero. Refer to `verify-signer-key-sig` for more information. +(define-public (stack-extend (extend-count uint) + (pox-addr { version: (buff 1), hashbytes: (buff 32) }) + (signer-sig (optional (buff 65))) + (signer-key (buff 33)) + (max-amount uint) + (auth-id uint)) + (let ((stacker-info (stx-account tx-sender)) + ;; to extend, there must already be an etry in the stacking-state + (stacker-state (unwrap! (get-stacker-info tx-sender) (err ERR_STACK_EXTEND_NOT_LOCKED))) + (amount-ustx (get locked stacker-info)) + (unlock-height (get unlock-height stacker-info)) + (cur-cycle (current-pox-reward-cycle)) + ;; first-extend-cycle will be the cycle in which tx-sender *would have* unlocked + (first-extend-cycle (burn-height-to-reward-cycle unlock-height)) + ;; new first cycle should be max(cur-cycle, stacker-state.first-reward-cycle) + (cur-first-reward-cycle (get first-reward-cycle stacker-state)) + (first-reward-cycle (if (> cur-cycle cur-first-reward-cycle) cur-cycle cur-first-reward-cycle))) + + ;; must be called with positive extend-count + (asserts! (>= extend-count u1) + (err ERR_STACKING_INVALID_LOCK_PERIOD)) + + ;; stacker must be directly stacking + (asserts! (> (len (get reward-set-indexes stacker-state)) u0) + (err ERR_STACKING_IS_DELEGATED)) + + ;; stacker must not be delegating + (asserts! (is-none (get delegated-to stacker-state)) + (err ERR_STACKING_IS_DELEGATED)) + + ;; Verify signature from delegate that allows this sender for this cycle + (try! (consume-signer-key-authorization pox-addr cur-cycle "stack-extend" extend-count signer-sig signer-key u0 max-amount auth-id)) + + (let ((last-extend-cycle (- (+ first-extend-cycle extend-count) u1)) + (lock-period (+ u1 (- last-extend-cycle first-reward-cycle))) + (new-unlock-ht (reward-cycle-to-burn-height (+ u1 last-extend-cycle)))) + + ;; first cycle must be after the current cycle + (asserts! (> first-extend-cycle cur-cycle) (err ERR_STACKING_INVALID_LOCK_PERIOD)) + ;; lock period must be positive + (asserts! (> lock-period u0) (err ERR_STACKING_INVALID_LOCK_PERIOD)) + + ;; must be called directly by the tx-sender or by an allowed contract-caller + (asserts! (check-caller-allowed) + (err ERR_STACKING_PERMISSION_DENIED)) + + ;; tx-sender must be locked + (asserts! (> amount-ustx u0) + (err ERR_STACK_EXTEND_NOT_LOCKED)) + + ;; tx-sender must not be delegating + (asserts! (is-none (get-check-delegation tx-sender)) + (err ERR_STACKING_ALREADY_DELEGATED)) + + ;; standard can-stack-stx checks + (try! (can-stack-stx pox-addr amount-ustx first-extend-cycle lock-period)) + + ;; register the PoX address with the amount stacked + ;; for the new cycles + (let ((extended-reward-set-indexes (try! (add-pox-addr-to-reward-cycles pox-addr first-extend-cycle extend-count amount-ustx tx-sender signer-key))) + (reward-set-indexes + ;; use the active stacker state and extend the existing reward-set-indexes + (let ((cur-cycle-index (- first-reward-cycle (get first-reward-cycle stacker-state))) + (old-indexes (get reward-set-indexes stacker-state)) + ;; build index list by taking the old-indexes starting from cur cycle + ;; and adding the new indexes to it. this way, the index is valid starting from the current cycle + (new-list (concat (default-to (list) (slice? old-indexes cur-cycle-index (len old-indexes))) + extended-reward-set-indexes))) + (unwrap-panic (as-max-len? new-list u12))))) + ;; update stacker record + (map-set stacking-state + { stacker: tx-sender } + { pox-addr: pox-addr, + reward-set-indexes: reward-set-indexes, + first-reward-cycle: first-reward-cycle, + lock-period: lock-period, + delegated-to: none }) + + ;; return lock-up information + (ok { stacker: tx-sender, unlock-burn-height: new-unlock-ht }))))) + +;; As a delegator, increase an active Stacking lock, issuing a "partial commitment" for the +;; increased cycles. +;; *New in Stacks 2.1* +;; This method increases `stacker`'s current lockup and partially commits the additional +;; STX to `pox-addr` +(define-public (delegate-stack-increase + (stacker principal) + (pox-addr { version: (buff 1), hashbytes: (buff 32) }) + (increase-by uint)) + (let ((stacker-info (stx-account stacker)) + (existing-lock (get locked stacker-info)) + (available-stx (get unlocked stacker-info)) + (unlock-height (get unlock-height stacker-info))) + + ;; must be called with positive `increase-by` + (asserts! (>= increase-by u1) + (err ERR_STACKING_INVALID_AMOUNT)) + + (let ((unlock-in-cycle (burn-height-to-reward-cycle unlock-height)) + (cur-cycle (current-pox-reward-cycle)) + (first-increase-cycle (+ cur-cycle u1)) + (last-increase-cycle (- unlock-in-cycle u1)) + (cycle-count (try! (if (<= first-increase-cycle last-increase-cycle) + (ok (+ u1 (- last-increase-cycle first-increase-cycle))) + (err ERR_STACKING_INVALID_LOCK_PERIOD)))) + (new-total-locked (+ increase-by existing-lock)) + (stacker-state + (unwrap! (map-get? stacking-state { stacker: stacker }) + (err ERR_STACK_INCREASE_NOT_LOCKED)))) + + ;; must be called directly by the tx-sender or by an allowed contract-caller + (asserts! (check-caller-allowed) + (err ERR_STACKING_PERMISSION_DENIED)) + + ;; stacker must not be directly stacking + (asserts! (is-eq (len (get reward-set-indexes stacker-state)) u0) + (err ERR_STACKING_NOT_DELEGATED)) + + ;; stacker must be delegated to tx-sender + (asserts! (is-eq (unwrap! (get delegated-to stacker-state) + (err ERR_STACKING_NOT_DELEGATED)) + tx-sender) + (err ERR_STACKING_PERMISSION_DENIED)) + + ;; stacker must be currently locked + (asserts! (> existing-lock u0) + (err ERR_STACK_INCREASE_NOT_LOCKED)) + + ;; stacker must have enough stx to lock + (asserts! (>= available-stx increase-by) + (err ERR_STACKING_INSUFFICIENT_FUNDS)) + + ;; stacker must have delegated to the caller + (let ((delegation-info (unwrap! (get-check-delegation stacker) (err ERR_STACKING_PERMISSION_DENIED))) + (delegated-to (get delegated-to delegation-info)) + (delegated-amount (get amount-ustx delegation-info)) + (delegated-pox-addr (get pox-addr delegation-info)) + (delegated-until (get until-burn-ht delegation-info))) + ;; must have delegated to tx-sender + (asserts! (is-eq delegated-to tx-sender) + (err ERR_STACKING_PERMISSION_DENIED)) + ;; must have delegated enough stx + (asserts! (>= delegated-amount new-total-locked) + (err ERR_DELEGATION_TOO_MUCH_LOCKED)) + ;; if pox-addr is set, must be equal to pox-addr + (asserts! (match delegated-pox-addr + specified-pox-addr (is-eq pox-addr specified-pox-addr) + true) + (err ERR_DELEGATION_POX_ADDR_REQUIRED)) + ;; delegation must not expire before lock period + (asserts! (match delegated-until + until-burn-ht + (>= until-burn-ht unlock-height) + true) + (err ERR_DELEGATION_EXPIRES_DURING_LOCK))) + + ;; delegate stacking does minimal-can-stack-stx + (try! (minimal-can-stack-stx pox-addr new-total-locked first-increase-cycle (+ u1 (- last-increase-cycle first-increase-cycle)))) + + ;; register the PoX address with the amount stacked via partial stacking + ;; before it can be included in the reward set, this must be committed! + (add-pox-partial-stacked pox-addr first-increase-cycle cycle-count increase-by) + + ;; stacking-state is unchanged, so no need to update + + ;; return the lock-up information, so the node can actually carry out the lock. + (ok { stacker: stacker, total-locked: new-total-locked})))) + +;; As a delegator, extend an active stacking lock, issuing a "partial commitment" for the +;; extended-to cycles. +;; *New in Stacks 2.1* +;; This method extends `stacker`'s current lockup for an additional `extend-count` +;; and partially commits those new cycles to `pox-addr` +(define-public (delegate-stack-extend + (stacker principal) + (pox-addr { version: (buff 1), hashbytes: (buff 32) }) + (extend-count uint)) + (let ((stacker-info (stx-account stacker)) + ;; to extend, there must already be an entry in the stacking-state + (stacker-state (unwrap! (get-stacker-info stacker) (err ERR_STACK_EXTEND_NOT_LOCKED))) + (amount-ustx (get locked stacker-info)) + (unlock-height (get unlock-height stacker-info)) + ;; first-extend-cycle will be the cycle in which tx-sender *would have* unlocked + (first-extend-cycle (burn-height-to-reward-cycle unlock-height)) + (cur-cycle (current-pox-reward-cycle)) + ;; new first cycle should be max(cur-cycle, stacker-state.first-reward-cycle) + (cur-first-reward-cycle (get first-reward-cycle stacker-state)) + (first-reward-cycle (if (> cur-cycle cur-first-reward-cycle) cur-cycle cur-first-reward-cycle))) + + ;; must be called with positive extend-count + (asserts! (>= extend-count u1) + (err ERR_STACKING_INVALID_LOCK_PERIOD)) + + (let ((last-extend-cycle (- (+ first-extend-cycle extend-count) u1)) + (lock-period (+ u1 (- last-extend-cycle first-reward-cycle))) + (new-unlock-ht (reward-cycle-to-burn-height (+ u1 last-extend-cycle)))) + + ;; first cycle must be after the current cycle + (asserts! (> first-extend-cycle cur-cycle) (err ERR_STACKING_INVALID_LOCK_PERIOD)) + ;; lock period must be positive + (asserts! (> lock-period u0) (err ERR_STACKING_INVALID_LOCK_PERIOD)) + + ;; must be called directly by the tx-sender or by an allowed contract-caller + (asserts! (check-caller-allowed) + (err ERR_STACKING_PERMISSION_DENIED)) + + ;; stacker must not be directly stacking + (asserts! (is-eq (len (get reward-set-indexes stacker-state)) u0) + (err ERR_STACKING_NOT_DELEGATED)) + + ;; stacker must be delegated to tx-sender + (asserts! (is-eq (unwrap! (get delegated-to stacker-state) + (err ERR_STACKING_NOT_DELEGATED)) + tx-sender) + (err ERR_STACKING_PERMISSION_DENIED)) + + ;; check valid lock period + (asserts! (check-pox-lock-period lock-period) + (err ERR_STACKING_INVALID_LOCK_PERIOD)) + + ;; stacker must be currently locked + (asserts! (> amount-ustx u0) + (err ERR_STACK_EXTEND_NOT_LOCKED)) + + ;; stacker must have delegated to the caller + (let ((delegation-info (unwrap! (get-check-delegation stacker) (err ERR_STACKING_PERMISSION_DENIED)))) + ;; must have delegated to tx-sender + (asserts! (is-eq (get delegated-to delegation-info) tx-sender) + (err ERR_STACKING_PERMISSION_DENIED)) + ;; must have delegated enough stx + (asserts! (>= (get amount-ustx delegation-info) amount-ustx) + (err ERR_DELEGATION_TOO_MUCH_LOCKED)) + ;; if pox-addr is set, must be equal to pox-addr + (asserts! (match (get pox-addr delegation-info) + specified-pox-addr (is-eq pox-addr specified-pox-addr) + true) + (err ERR_DELEGATION_POX_ADDR_REQUIRED)) + ;; delegation must not expire before lock period + (asserts! (match (get until-burn-ht delegation-info) + until-burn-ht (>= until-burn-ht + new-unlock-ht) + true) + (err ERR_DELEGATION_EXPIRES_DURING_LOCK)) + ) + + ;; delegate stacking does minimal-can-stack-stx + (try! (minimal-can-stack-stx pox-addr amount-ustx first-extend-cycle lock-period)) + + ;; register the PoX address with the amount stacked via partial stacking + ;; before it can be included in the reward set, this must be committed! + (add-pox-partial-stacked pox-addr first-extend-cycle extend-count amount-ustx) + + (map-set stacking-state + { stacker: stacker } + { pox-addr: pox-addr, + reward-set-indexes: (list), + first-reward-cycle: first-reward-cycle, + lock-period: lock-period, + delegated-to: (some tx-sender) }) + + ;; return the lock-up information, so the node can actually carry out the lock. + (ok { stacker: stacker, + unlock-burn-height: new-unlock-ht })))) + +;; Add an authorization for a signer key. +;; When an authorization is added, the `signer-sig` argument is not required +;; in the functions that use it as an argument. +;; The `allowed` flag can be used to either enable or disable the authorization. +;; Only the Stacks principal associated with `signer-key` can call this function. +;; +;; Refer to the documentation for `verify-signer-key-sig` for more information +;; regarding the parameters used in an authorization. When the authorization is used +;; in `stack-stx` and `stack-extend`, the `reward-cycle` refers to the reward cycle +;; where the transaction is confirmed, **not** the reward cycle where stacking begins. +;; The `period` parameter must match the exact lock period (or extend count) used +;; in the stacking transaction. The `max-amount` parameter specifies the maximum amount +;; of STX that can be locked in an individual stacking transaction. `auth-id` is a +;; random uint to prevent replays. +;; +;; *New in Stacks 3.0* +(define-public (set-signer-key-authorization (pox-addr { version: (buff 1), hashbytes: (buff 32)}) + (period uint) + (reward-cycle uint) + (topic (string-ascii 14)) + (signer-key (buff 33)) + (allowed bool) + (max-amount uint) + (auth-id uint)) + (begin + ;; must be called directly by the tx-sender or by an allowed contract-caller + (asserts! (check-caller-allowed) + (err ERR_NOT_ALLOWED)) + ;; Validate that `tx-sender` has the same pubkey hash as `signer-key` + (asserts! (is-eq + (unwrap! (principal-construct? (if is-in-mainnet STACKS_ADDR_VERSION_MAINNET STACKS_ADDR_VERSION_TESTNET) (hash160 signer-key)) (err ERR_INVALID_SIGNER_KEY)) + tx-sender) (err ERR_NOT_ALLOWED)) + ;; Must be called with positive period + (asserts! (>= period u1) (err ERR_STACKING_INVALID_LOCK_PERIOD)) + ;; Must be current or future reward cycle + (asserts! (>= reward-cycle (current-pox-reward-cycle)) (err ERR_INVALID_REWARD_CYCLE)) + (map-set signer-key-authorizations { pox-addr: pox-addr, period: period, reward-cycle: reward-cycle, topic: topic, signer-key: signer-key, auth-id: auth-id, max-amount: max-amount } allowed) + (ok allowed))) + +;; Get the _current_ PoX stacking delegation information for a stacker. If the information +;; is expired, or if there's never been such a stacker, then returns none. +;; *New in Stacks 2.1* +(define-read-only (get-delegation-info (stacker principal)) + (get-check-delegation stacker) +) + +;; Get the burn height at which a particular contract is allowed to stack for a particular principal. +;; *New in Stacks 2.1* +;; Returns (some (some X)) if X is the burn height at which the allowance terminates +;; Returns (some none) if the caller is allowed indefinitely +;; Returns none if there is no allowance record +(define-read-only (get-allowance-contract-callers (sender principal) (calling-contract principal)) + (map-get? allowance-contract-callers { sender: sender, contract-caller: calling-contract }) +) + +;; How many PoX addresses in this reward cycle? +;; *New in Stacks 2.1* +(define-read-only (get-num-reward-set-pox-addresses (reward-cycle uint)) + (match (map-get? reward-cycle-pox-address-list-len { reward-cycle: reward-cycle }) + num-addrs + (get len num-addrs) + u0 + ) +) + +;; How many uSTX have been locked up for this address so far, before the delegator commits them? +;; *New in Stacks 2.1* +(define-read-only (get-partial-stacked-by-cycle (pox-addr { version: (buff 1), hashbytes: (buff 32) }) (reward-cycle uint) (sender principal)) + (map-get? partial-stacked-by-cycle { pox-addr: pox-addr, reward-cycle: reward-cycle, sender: sender }) +) diff --git a/contrib/boot-contracts-unit-tests/contracts/indirect.clar b/contrib/boot-contracts-unit-tests/contracts/indirect.clar new file mode 100644 index 00000000000..c9889b00d2a --- /dev/null +++ b/contrib/boot-contracts-unit-tests/contracts/indirect.clar @@ -0,0 +1,112 @@ +(define-public (stack-stx (amount-ustx uint) + (pox-addr (tuple (version (buff 1)) (hashbytes (buff 32)))) + (start-burn-ht uint) + (lock-period uint) + (signer-sig (optional (buff 65))) + (signer-key (buff 33)) + (max-amount uint) + (auth-id uint)) + (contract-call? 'ST000000000000000000002AMW42H.pox-4 stack-stx amount-ustx pox-addr start-burn-ht lock-period signer-sig signer-key max-amount auth-id) +) + +(define-public (stack-increase + (increase-by uint) + (signer-sig (optional (buff 65))) + (signer-key (buff 33)) + (max-amount uint) + (auth-id uint)) + (contract-call? 'ST000000000000000000002AMW42H.pox-4 stack-increase increase-by signer-sig signer-key max-amount auth-id) +) + +(define-public (stack-extend (extend-count uint) + (pox-addr { version: (buff 1), hashbytes: (buff 32) }) + (signer-sig (optional (buff 65))) + (signer-key (buff 33)) + (max-amount uint) + (auth-id uint)) + (contract-call? 'ST000000000000000000002AMW42H.pox-4 stack-extend extend-count pox-addr signer-sig signer-key max-amount auth-id) +) + +(define-public (delegate-stx (amount-ustx uint) + (delegate-to principal) + (until-burn-ht (optional uint)) + (pox-addr (optional { version: (buff 1), hashbytes: (buff 32) }))) + (contract-call? 'ST000000000000000000002AMW42H.pox-4 delegate-stx amount-ustx delegate-to until-burn-ht pox-addr) +) + +(define-public (revoke-delegate-stx) + (contract-call? 'ST000000000000000000002AMW42H.pox-4 revoke-delegate-stx) +) + +(define-public (allow-contract-caller (caller principal) (until-burn-ht (optional uint))) + (contract-call? 'ST000000000000000000002AMW42H.pox-4 allow-contract-caller caller until-burn-ht) +) + +(define-public (disallow-contract-caller (caller principal)) + (contract-call? 'ST000000000000000000002AMW42H.pox-4 disallow-contract-caller caller) +) + +(define-read-only (check-caller-allowed) + (contract-call? 'ST000000000000000000002AMW42H.pox-4 check-caller-allowed) +) + +(define-public (delegate-stack-stx (stacker principal) + (amount-ustx uint) + (pox-addr { version: (buff 1), hashbytes: (buff 32) }) + (start-burn-ht uint) + (lock-period uint)) + (contract-call? 'ST000000000000000000002AMW42H.pox-4 delegate-stack-stx stacker amount-ustx pox-addr start-burn-ht lock-period) +) + +(define-public (stack-aggregation-commit-indexed (pox-addr { version: (buff 1), hashbytes: (buff 32) }) + (reward-cycle uint) + (signer-sig (optional (buff 65))) + (signer-key (buff 33)) + (max-amount uint) + (auth-id uint)) + (contract-call? 'ST000000000000000000002AMW42H.pox-4 stack-aggregation-commit-indexed pox-addr reward-cycle signer-sig signer-key max-amount auth-id) +) + +(define-public (stack-aggregation-commit (pox-addr { version: (buff 1), hashbytes: (buff 32) }) + (reward-cycle uint) + (signer-sig (optional (buff 65))) + (signer-key (buff 33)) + (max-amount uint) + (auth-id uint)) + (contract-call? 'ST000000000000000000002AMW42H.pox-4 stack-aggregation-commit pox-addr reward-cycle signer-sig signer-key max-amount auth-id) +) + +(define-public (stack-aggregation-increase (pox-addr { version: (buff 1), hashbytes: (buff 32) }) + (reward-cycle uint) + (reward-cycle-index uint) + (signer-sig (optional (buff 65))) + (signer-key (buff 33)) + (max-amount uint) + (auth-id uint)) + (contract-call? 'ST000000000000000000002AMW42H.pox-4 stack-aggregation-increase pox-addr reward-cycle reward-cycle-index signer-sig signer-key max-amount auth-id) +) + +(define-public (delegate-stack-extend + (stacker principal) + (pox-addr { version: (buff 1), hashbytes: (buff 32) }) + (extend-count uint)) + (contract-call? 'ST000000000000000000002AMW42H.pox-4 delegate-stack-extend stacker pox-addr extend-count) +) + +(define-public (delegate-stack-increase + (stacker principal) + (pox-addr { version: (buff 1), hashbytes: (buff 32) }) + (increase-by uint)) + (contract-call? 'ST000000000000000000002AMW42H.pox-4 delegate-stack-increase stacker pox-addr increase-by) +) + +(define-public (set-signer-key-authorization (pox-addr { version: (buff 1), hashbytes: (buff 32)}) + (period uint) + (reward-cycle uint) + (topic (string-ascii 14)) + (signer-key (buff 33)) + (allowed bool) + (max-amount uint) + (auth-id uint)) + (contract-call? 'ST000000000000000000002AMW42H.pox-4 set-signer-key-authorization pox-addr period reward-cycle topic signer-key allowed max-amount auth-id) +) diff --git a/contrib/boot-contracts-unit-tests/deployments/default.simnet-plan.yaml b/contrib/boot-contracts-unit-tests/deployments/default.simnet-plan.yaml new file mode 100644 index 00000000000..ab7335aae28 --- /dev/null +++ b/contrib/boot-contracts-unit-tests/deployments/default.simnet-plan.yaml @@ -0,0 +1,57 @@ +--- +id: 0 +name: "Simulated deployment, used as a default for `clarinet console`, `clarinet test` and `clarinet check`" +network: simnet +genesis: + wallets: + - name: deployer + address: ST1PQHQKV0RJXZFY1DGX8MNSNYVE3VGZJSRTPGZGM + balance: "100000000000000" + - name: faucet + address: STNHKEPYEPJ8ET55ZZ0M5A34J0R3N5FM2CMMMAZ6 + balance: "100000000000000" + - name: wallet_1 + address: ST1SJ3DTE5DN7X54YDH5D64R3BCB6A2AG2ZQ8YPD5 + balance: "100000000000000" + - name: wallet_2 + address: ST2CY5V39NHDPWSXMW9QDT3HC3GD6Q6XX4CFRK9AG + balance: "100000000000000" + - name: wallet_3 + address: ST2JHG361ZXG51QTKY2NQCVBPPRRE2KZB1HR05NNC + balance: "100000000000000" + - name: wallet_4 + address: ST2NEB84ASENDXKYGJPQW86YXQCEFEX2ZQPG87ND + balance: "100000000000000" + - name: wallet_5 + address: ST2REHHS5J3CERCRBEPMGH7921Q6PYKAADT7JP2VB + balance: "100000000000000" + - name: wallet_6 + address: ST3AM1A56AK2C1XAFJ4115ZSV26EB49BVQ10MGCS0 + balance: "100000000000000" + - name: wallet_7 + address: ST3PF13W7Z0RRM42A8VZRVFQ75SV1K26RXEP8YGKJ + balance: "100000000000000" + - name: wallet_8 + address: ST3NBRSFKX28FQ2ZJ1MAKX58HKHSDGNV5N7R21XCP + balance: "100000000000000" + contracts: + - costs + - pox + - pox-2 + - pox-3 + - pox-4 + - lockup + - costs-2 + - costs-3 + - cost-voting + - bns +plan: + batches: + - id: 0 + transactions: + - emulated-contract-publish: + contract-name: indirect + emulated-sender: ST1PQHQKV0RJXZFY1DGX8MNSNYVE3VGZJSRTPGZGM + path: contracts/indirect.clar + clarity-version: 2 + epoch: "2.4" diff --git a/contrib/boot-contracts-unit-tests/package-lock.json b/contrib/boot-contracts-unit-tests/package-lock.json new file mode 100644 index 00000000000..bee7c735ff5 --- /dev/null +++ b/contrib/boot-contracts-unit-tests/package-lock.json @@ -0,0 +1,2359 @@ +{ + "name": "boot-contracts-unit-tests-tests", + "version": "1.0.0", + "lockfileVersion": 3, + "requires": true, + "packages": { + "": { + "name": "boot-contracts-unit-tests-tests", + "version": "1.0.0", + "license": "ISC", + "dependencies": { + "@hirosystems/clarinet-sdk": "^2.6.0", + "@stacks/encryption": "^6.13.1", + "@stacks/network": "^6.13.0", + "@stacks/stacking": "^6.14.0", + "@stacks/transactions": "^6.13.1", + "chokidar-cli": "^3.0.0", + "typescript": "^5.3.3", + "vite": "^5.1.4", + "vitest": "^1.5.2", + "vitest-environment-clarinet": "^2.1.0" + } + }, + "node_modules/@esbuild/aix-ppc64": { + "version": "0.20.2", + "resolved": "https://registry.npmjs.org/@esbuild/aix-ppc64/-/aix-ppc64-0.20.2.tgz", + "integrity": "sha512-D+EBOJHXdNZcLJRBkhENNG8Wji2kgc9AZ9KiPr1JuZjsNtyHzrsfLRrY0tk2H2aoFu6RANO1y1iPPUCDYWkb5g==", + "cpu": [ + "ppc64" + ], + "optional": true, + "os": [ + "aix" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/android-arm": { + "version": "0.20.2", + "resolved": "https://registry.npmjs.org/@esbuild/android-arm/-/android-arm-0.20.2.tgz", + "integrity": "sha512-t98Ra6pw2VaDhqNWO2Oph2LXbz/EJcnLmKLGBJwEwXX/JAN83Fym1rU8l0JUWK6HkIbWONCSSatf4sf2NBRx/w==", + "cpu": [ + "arm" + ], + "optional": true, + "os": [ + "android" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/android-arm64": { + "version": "0.20.2", + "resolved": "https://registry.npmjs.org/@esbuild/android-arm64/-/android-arm64-0.20.2.tgz", + "integrity": "sha512-mRzjLacRtl/tWU0SvD8lUEwb61yP9cqQo6noDZP/O8VkwafSYwZ4yWy24kan8jE/IMERpYncRt2dw438LP3Xmg==", + "cpu": [ + "arm64" + ], + "optional": true, + "os": [ + "android" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/android-x64": { + "version": "0.20.2", + "resolved": "https://registry.npmjs.org/@esbuild/android-x64/-/android-x64-0.20.2.tgz", + "integrity": "sha512-btzExgV+/lMGDDa194CcUQm53ncxzeBrWJcncOBxuC6ndBkKxnHdFJn86mCIgTELsooUmwUm9FkhSp5HYu00Rg==", + "cpu": [ + "x64" + ], + "optional": true, + "os": [ + "android" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/darwin-arm64": { + "version": "0.20.2", + "resolved": "https://registry.npmjs.org/@esbuild/darwin-arm64/-/darwin-arm64-0.20.2.tgz", + "integrity": "sha512-4J6IRT+10J3aJH3l1yzEg9y3wkTDgDk7TSDFX+wKFiWjqWp/iCfLIYzGyasx9l0SAFPT1HwSCR+0w/h1ES/MjA==", + "cpu": [ + "arm64" + ], + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/darwin-x64": { + "version": "0.20.2", + "resolved": "https://registry.npmjs.org/@esbuild/darwin-x64/-/darwin-x64-0.20.2.tgz", + "integrity": "sha512-tBcXp9KNphnNH0dfhv8KYkZhjc+H3XBkF5DKtswJblV7KlT9EI2+jeA8DgBjp908WEuYll6pF+UStUCfEpdysA==", + "cpu": [ + "x64" + ], + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/freebsd-arm64": { + "version": "0.20.2", + "resolved": "https://registry.npmjs.org/@esbuild/freebsd-arm64/-/freebsd-arm64-0.20.2.tgz", + "integrity": "sha512-d3qI41G4SuLiCGCFGUrKsSeTXyWG6yem1KcGZVS+3FYlYhtNoNgYrWcvkOoaqMhwXSMrZRl69ArHsGJ9mYdbbw==", + "cpu": [ + "arm64" + ], + "optional": true, + "os": [ + "freebsd" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/freebsd-x64": { + "version": "0.20.2", + "resolved": "https://registry.npmjs.org/@esbuild/freebsd-x64/-/freebsd-x64-0.20.2.tgz", + "integrity": "sha512-d+DipyvHRuqEeM5zDivKV1KuXn9WeRX6vqSqIDgwIfPQtwMP4jaDsQsDncjTDDsExT4lR/91OLjRo8bmC1e+Cw==", + "cpu": [ + "x64" + ], + "optional": true, + "os": [ + "freebsd" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/linux-arm": { + "version": "0.20.2", + "resolved": "https://registry.npmjs.org/@esbuild/linux-arm/-/linux-arm-0.20.2.tgz", + "integrity": "sha512-VhLPeR8HTMPccbuWWcEUD1Az68TqaTYyj6nfE4QByZIQEQVWBB8vup8PpR7y1QHL3CpcF6xd5WVBU/+SBEvGTg==", + "cpu": [ + "arm" + ], + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/linux-arm64": { + "version": "0.20.2", + "resolved": "https://registry.npmjs.org/@esbuild/linux-arm64/-/linux-arm64-0.20.2.tgz", + "integrity": "sha512-9pb6rBjGvTFNira2FLIWqDk/uaf42sSyLE8j1rnUpuzsODBq7FvpwHYZxQ/It/8b+QOS1RYfqgGFNLRI+qlq2A==", + "cpu": [ + "arm64" + ], + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/linux-ia32": { + "version": "0.20.2", + "resolved": "https://registry.npmjs.org/@esbuild/linux-ia32/-/linux-ia32-0.20.2.tgz", + "integrity": "sha512-o10utieEkNPFDZFQm9CoP7Tvb33UutoJqg3qKf1PWVeeJhJw0Q347PxMvBgVVFgouYLGIhFYG0UGdBumROyiig==", + "cpu": [ + "ia32" + ], + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/linux-loong64": { + "version": "0.20.2", + "resolved": "https://registry.npmjs.org/@esbuild/linux-loong64/-/linux-loong64-0.20.2.tgz", + "integrity": "sha512-PR7sp6R/UC4CFVomVINKJ80pMFlfDfMQMYynX7t1tNTeivQ6XdX5r2XovMmha/VjR1YN/HgHWsVcTRIMkymrgQ==", + "cpu": [ + "loong64" + ], + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/linux-mips64el": { + "version": "0.20.2", + "resolved": "https://registry.npmjs.org/@esbuild/linux-mips64el/-/linux-mips64el-0.20.2.tgz", + "integrity": "sha512-4BlTqeutE/KnOiTG5Y6Sb/Hw6hsBOZapOVF6njAESHInhlQAghVVZL1ZpIctBOoTFbQyGW+LsVYZ8lSSB3wkjA==", + "cpu": [ + "mips64el" + ], + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/linux-ppc64": { + "version": "0.20.2", + "resolved": "https://registry.npmjs.org/@esbuild/linux-ppc64/-/linux-ppc64-0.20.2.tgz", + "integrity": "sha512-rD3KsaDprDcfajSKdn25ooz5J5/fWBylaaXkuotBDGnMnDP1Uv5DLAN/45qfnf3JDYyJv/ytGHQaziHUdyzaAg==", + "cpu": [ + "ppc64" + ], + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/linux-riscv64": { + "version": "0.20.2", + "resolved": "https://registry.npmjs.org/@esbuild/linux-riscv64/-/linux-riscv64-0.20.2.tgz", + "integrity": "sha512-snwmBKacKmwTMmhLlz/3aH1Q9T8v45bKYGE3j26TsaOVtjIag4wLfWSiZykXzXuE1kbCE+zJRmwp+ZbIHinnVg==", + "cpu": [ + "riscv64" + ], + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/linux-s390x": { + "version": "0.20.2", + "resolved": "https://registry.npmjs.org/@esbuild/linux-s390x/-/linux-s390x-0.20.2.tgz", + "integrity": "sha512-wcWISOobRWNm3cezm5HOZcYz1sKoHLd8VL1dl309DiixxVFoFe/o8HnwuIwn6sXre88Nwj+VwZUvJf4AFxkyrQ==", + "cpu": [ + "s390x" + ], + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/linux-x64": { + "version": "0.20.2", + "resolved": "https://registry.npmjs.org/@esbuild/linux-x64/-/linux-x64-0.20.2.tgz", + "integrity": "sha512-1MdwI6OOTsfQfek8sLwgyjOXAu+wKhLEoaOLTjbijk6E2WONYpH9ZU2mNtR+lZ2B4uwr+usqGuVfFT9tMtGvGw==", + "cpu": [ + "x64" + ], + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/netbsd-x64": { + "version": "0.20.2", + "resolved": "https://registry.npmjs.org/@esbuild/netbsd-x64/-/netbsd-x64-0.20.2.tgz", + "integrity": "sha512-K8/DhBxcVQkzYc43yJXDSyjlFeHQJBiowJ0uVL6Tor3jGQfSGHNNJcWxNbOI8v5k82prYqzPuwkzHt3J1T1iZQ==", + "cpu": [ + "x64" + ], + "optional": true, + "os": [ + "netbsd" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/openbsd-x64": { + "version": "0.20.2", + "resolved": "https://registry.npmjs.org/@esbuild/openbsd-x64/-/openbsd-x64-0.20.2.tgz", + "integrity": "sha512-eMpKlV0SThJmmJgiVyN9jTPJ2VBPquf6Kt/nAoo6DgHAoN57K15ZghiHaMvqjCye/uU4X5u3YSMgVBI1h3vKrQ==", + "cpu": [ + "x64" + ], + "optional": true, + "os": [ + "openbsd" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/sunos-x64": { + "version": "0.20.2", + "resolved": "https://registry.npmjs.org/@esbuild/sunos-x64/-/sunos-x64-0.20.2.tgz", + "integrity": "sha512-2UyFtRC6cXLyejf/YEld4Hajo7UHILetzE1vsRcGL3earZEW77JxrFjH4Ez2qaTiEfMgAXxfAZCm1fvM/G/o8w==", + "cpu": [ + "x64" + ], + "optional": true, + "os": [ + "sunos" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/win32-arm64": { + "version": "0.20.2", + "resolved": "https://registry.npmjs.org/@esbuild/win32-arm64/-/win32-arm64-0.20.2.tgz", + "integrity": "sha512-GRibxoawM9ZCnDxnP3usoUDO9vUkpAxIIZ6GQI+IlVmr5kP3zUq+l17xELTHMWTWzjxa2guPNyrpq1GWmPvcGQ==", + "cpu": [ + "arm64" + ], + "optional": true, + "os": [ + "win32" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/win32-ia32": { + "version": "0.20.2", + "resolved": "https://registry.npmjs.org/@esbuild/win32-ia32/-/win32-ia32-0.20.2.tgz", + "integrity": "sha512-HfLOfn9YWmkSKRQqovpnITazdtquEW8/SoHW7pWpuEeguaZI4QnCRW6b+oZTztdBnZOS2hqJ6im/D5cPzBTTlQ==", + "cpu": [ + "ia32" + ], + "optional": true, + "os": [ + "win32" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/win32-x64": { + "version": "0.20.2", + "resolved": "https://registry.npmjs.org/@esbuild/win32-x64/-/win32-x64-0.20.2.tgz", + "integrity": "sha512-N49X4lJX27+l9jbLKSqZ6bKNjzQvHaT8IIFUy+YIqmXQdjYCToGWwOItDrfby14c78aDd5NHQl29xingXfCdLQ==", + "cpu": [ + "x64" + ], + "optional": true, + "os": [ + "win32" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@hirosystems/clarinet-sdk": { + "version": "2.6.0", + "resolved": "https://registry.npmjs.org/@hirosystems/clarinet-sdk/-/clarinet-sdk-2.6.0.tgz", + "integrity": "sha512-8qyvpaeTmhn/Lrsg7zjNpIr9Ova1zVfzMNeBC4+y42tqxHX0j6MM58nr5m56bz5/0u+KPOvQpAhuVxGR27/NiA==", + "dependencies": { + "@hirosystems/clarinet-sdk-wasm": "^2.6.0", + "@stacks/encryption": "^6.13.0", + "@stacks/network": "^6.13.0", + "@stacks/stacking": "^6.13.0", + "@stacks/transactions": "^6.13.0", + "kolorist": "^1.8.0", + "prompts": "^2.4.2", + "vitest": "^1.0.4", + "yargs": "^17.7.2" + }, + "bin": { + "clarinet-sdk": "dist/cjs/bin/index.js" + }, + "engines": { + "node": ">=18.0.0" + } + }, + "node_modules/@hirosystems/clarinet-sdk-wasm": { + "version": "2.6.0", + "resolved": "https://registry.npmjs.org/@hirosystems/clarinet-sdk-wasm/-/clarinet-sdk-wasm-2.6.0.tgz", + "integrity": "sha512-cUpYrnLX4VnpnumlYTCUNf1gFfl2kL18q63C1qFzUzkjFszffR+x0U2lxOQrz3EY3/U6eWeZvZPdKbOFO3zgqQ==" + }, + "node_modules/@jest/schemas": { + "version": "29.6.3", + "resolved": "https://registry.npmjs.org/@jest/schemas/-/schemas-29.6.3.tgz", + "integrity": "sha512-mo5j5X+jIZmJQveBKeS/clAueipV7KgiX1vMgCxam1RNYiqE1w62n0/tJJnHtjW8ZHcQco5gY85jA3mi0L+nSA==", + "dependencies": { + "@sinclair/typebox": "^0.27.8" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/@jridgewell/sourcemap-codec": { + "version": "1.4.15", + "resolved": "https://registry.npmjs.org/@jridgewell/sourcemap-codec/-/sourcemap-codec-1.4.15.tgz", + "integrity": "sha512-eF2rxCRulEKXHTRiDrDy6erMYWqNw4LPdQ8UQA4huuxaQsVeRPFl2oM8oDGxMFhJUWZf9McpLtJasDDZb/Bpeg==" + }, + "node_modules/@noble/hashes": { + "version": "1.1.5", + "resolved": "https://registry.npmjs.org/@noble/hashes/-/hashes-1.1.5.tgz", + "integrity": "sha512-LTMZiiLc+V4v1Yi16TD6aX2gmtKszNye0pQgbaLqkvhIqP7nVsSaJsWloGQjJfJ8offaoP5GtX3yY5swbcJxxQ==", + "funding": [ + { + "type": "individual", + "url": "https://paulmillr.com/funding/" + } + ] + }, + "node_modules/@noble/secp256k1": { + "version": "1.7.1", + "resolved": "https://registry.npmjs.org/@noble/secp256k1/-/secp256k1-1.7.1.tgz", + "integrity": "sha512-hOUk6AyBFmqVrv7k5WAw/LpszxVbj9gGN4JRkIX52fdFAj1UA61KXmZDvqVEm+pOyec3+fIeZB02LYa/pWOArw==", + "funding": [ + { + "type": "individual", + "url": "https://paulmillr.com/funding/" + } + ] + }, + "node_modules/@rollup/rollup-android-arm-eabi": { + "version": "4.16.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-android-arm-eabi/-/rollup-android-arm-eabi-4.16.1.tgz", + "integrity": "sha512-92/y0TqNLRYOTXpm6Z7mnpvKAG9P7qmK7yJeRJSdzElNCUnsgbpAsGqerUboYRIQKzgfq4pWu9xVkgpWLfmNsw==", + "cpu": [ + "arm" + ], + "optional": true, + "os": [ + "android" + ] + }, + "node_modules/@rollup/rollup-android-arm64": { + "version": "4.16.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-android-arm64/-/rollup-android-arm64-4.16.1.tgz", + "integrity": "sha512-ttWB6ZCfRLuDIUiE0yiu5gcqOsYjA5F7kEV1ggHMj20FwLZ8A1FMeahZJFl/pnOmcnD2QL0z4AcDuo27utGU8A==", + "cpu": [ + "arm64" + ], + "optional": true, + "os": [ + "android" + ] + }, + "node_modules/@rollup/rollup-darwin-arm64": { + "version": "4.16.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-darwin-arm64/-/rollup-darwin-arm64-4.16.1.tgz", + "integrity": "sha512-QLDvPLetbqjHojTGFw9+nuSP3YY/iz2k1cep6crYlr97sS+ZJ0W43b8Z0zC00+lnFZj6JSNxiA4DjboNQMuh1A==", + "cpu": [ + "arm64" + ], + "optional": true, + "os": [ + "darwin" + ] + }, + "node_modules/@rollup/rollup-darwin-x64": { + "version": "4.16.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-darwin-x64/-/rollup-darwin-x64-4.16.1.tgz", + "integrity": "sha512-TAUK/D8khRrRIa1KwRzo8JNKk3tcqaeXWdtsiLgA8zmACWwlWLjPCJ4DULGHQrMkeBjp1Cd3Yuwx04lZgFx5Vg==", + "cpu": [ + "x64" + ], + "optional": true, + "os": [ + "darwin" + ] + }, + "node_modules/@rollup/rollup-linux-arm-gnueabihf": { + "version": "4.16.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm-gnueabihf/-/rollup-linux-arm-gnueabihf-4.16.1.tgz", + "integrity": "sha512-KO+WGZjrh6zyFTD1alIFkfdtxf8B4BC+hqd3kBZHscPLvE5FR/6QKsyuCT0JlERxxYBSUKNUQ/UHyX5uwO1x2A==", + "cpu": [ + "arm" + ], + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-arm-musleabihf": { + "version": "4.16.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm-musleabihf/-/rollup-linux-arm-musleabihf-4.16.1.tgz", + "integrity": "sha512-NqxbllzIB1WoAo4ThUXVtd21iiM5IHMTTXmXySKBLVcZvkU0HIZmatlP7hLzb5yQubcmdIeWmncd2NdsjocEiw==", + "cpu": [ + "arm" + ], + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-arm64-gnu": { + "version": "4.16.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm64-gnu/-/rollup-linux-arm64-gnu-4.16.1.tgz", + "integrity": "sha512-snma5NvV8y7IECQ5rq0sr0f3UUu+92NVmG/913JXJMcXo84h9ak9TA5UI9Cl2XRM9j3m37QwDBtEYnJzRkSmxA==", + "cpu": [ + "arm64" + ], + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-arm64-musl": { + "version": "4.16.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm64-musl/-/rollup-linux-arm64-musl-4.16.1.tgz", + "integrity": "sha512-KOvqGprlD84ueivhCi2flvcUwDRD20mAsE3vxQNVEI2Di9tnPGAfEu6UcrSPZbM+jG2w1oSr43hrPo0RNg6GGg==", + "cpu": [ + "arm64" + ], + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-powerpc64le-gnu": { + "version": "4.16.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-powerpc64le-gnu/-/rollup-linux-powerpc64le-gnu-4.16.1.tgz", + "integrity": "sha512-/gsNwtiGLqYwN4vP+EIdUC6Q6LTlpupWqokqIndvZcjn9ig/5P01WyaYCU2wvfL/2Z82jp5kX8c1mDBOvCP3zg==", + "cpu": [ + "ppc64" + ], + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-riscv64-gnu": { + "version": "4.16.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-riscv64-gnu/-/rollup-linux-riscv64-gnu-4.16.1.tgz", + "integrity": "sha512-uU8zuGkQfGqfD9w6VRJZI4IuG4JIfNxxJgEmLMAmPVHREKGsxFVfgHy5c6CexQF2vOfgjB33OsET3Vdn2lln9A==", + "cpu": [ + "riscv64" + ], + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-s390x-gnu": { + "version": "4.16.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-s390x-gnu/-/rollup-linux-s390x-gnu-4.16.1.tgz", + "integrity": "sha512-lsjLtDgtcGFEuBP6yrXwkRN5/wKlvUZtfbKZZu0yaoNpiBL4epgnO21osAALIspVRnl4qZgyLFd8xjCYYWgwfw==", + "cpu": [ + "s390x" + ], + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-x64-gnu": { + "version": "4.16.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-x64-gnu/-/rollup-linux-x64-gnu-4.16.1.tgz", + "integrity": "sha512-N2ZizKhUryqqrMfdCnjhJhZRgv61C6gK+hwVtCIKC8ts8J+go+vqENnGexwg21nHIOvLN5mBM8a7DI2vlyIOPg==", + "cpu": [ + "x64" + ], + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-x64-musl": { + "version": "4.16.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-x64-musl/-/rollup-linux-x64-musl-4.16.1.tgz", + "integrity": "sha512-5ICeMxqg66FrOA2AbnBQ2TJVxfvZsKLxmof0ibvPLaYtbsJqnTUtJOofgWb46Gjd4uZcA4rdsp4JCxegzQPqCg==", + "cpu": [ + "x64" + ], + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-win32-arm64-msvc": { + "version": "4.16.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-arm64-msvc/-/rollup-win32-arm64-msvc-4.16.1.tgz", + "integrity": "sha512-1vIP6Ce02L+qWD7uZYRiFiuAJo3m9kARatWmFSnss0gZnVj2Id7OPUU9gm49JPGasgcR3xMqiH3fqBJ8t00yVg==", + "cpu": [ + "arm64" + ], + "optional": true, + "os": [ + "win32" + ] + }, + "node_modules/@rollup/rollup-win32-ia32-msvc": { + "version": "4.16.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-ia32-msvc/-/rollup-win32-ia32-msvc-4.16.1.tgz", + "integrity": "sha512-Y3M92DcVsT6LoP+wrKpoUWPaazaP1fzbNkp0a0ZSj5Y//+pQVfVe/tQdsYQQy7dwXR30ZfALUIc9PCh9Izir6w==", + "cpu": [ + "ia32" + ], + "optional": true, + "os": [ + "win32" + ] + }, + "node_modules/@rollup/rollup-win32-x64-msvc": { + "version": "4.16.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-x64-msvc/-/rollup-win32-x64-msvc-4.16.1.tgz", + "integrity": "sha512-x0fvpHMuF7fK5r8oZxSi8VYXkrVmRgubXpO/wcf15Lk3xZ4Jvvh5oG+u7Su1776A7XzVKZhD2eRc4t7H50gL3w==", + "cpu": [ + "x64" + ], + "optional": true, + "os": [ + "win32" + ] + }, + "node_modules/@scure/base": { + "version": "1.1.6", + "resolved": "https://registry.npmjs.org/@scure/base/-/base-1.1.6.tgz", + "integrity": "sha512-ok9AWwhcgYuGG3Zfhyqg+zwl+Wn5uE+dwC0NV/2qQkx4dABbb/bx96vWu8NSj+BNjjSjno+JRYRjle1jV08k3g==", + "funding": { + "url": "https://paulmillr.com/funding/" + } + }, + "node_modules/@scure/bip39": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/@scure/bip39/-/bip39-1.1.0.tgz", + "integrity": "sha512-pwrPOS16VeTKg98dYXQyIjJEcWfz7/1YJIwxUEPFfQPtc86Ym/1sVgQ2RLoD43AazMk2l/unK4ITySSpW2+82w==", + "funding": [ + { + "type": "individual", + "url": "https://paulmillr.com/funding/" + } + ], + "dependencies": { + "@noble/hashes": "~1.1.1", + "@scure/base": "~1.1.0" + } + }, + "node_modules/@sinclair/typebox": { + "version": "0.27.8", + "resolved": "https://registry.npmjs.org/@sinclair/typebox/-/typebox-0.27.8.tgz", + "integrity": "sha512-+Fj43pSMwJs4KRrH/938Uf+uAELIgVBmQzg/q1YG10djyfA3TnrU8N8XzqCh/okZdszqBQTZf96idMfE5lnwTA==" + }, + "node_modules/@stacks/common": { + "version": "6.13.0", + "resolved": "https://registry.npmjs.org/@stacks/common/-/common-6.13.0.tgz", + "integrity": "sha512-wwzyihjaSdmL6NxKvDeayy3dqM0L0Q2sawmdNtzJDi0FnXuJGm5PeapJj7bEfcI9XwI7Bw5jZoC6mCn9nc5YIw==", + "dependencies": { + "@types/bn.js": "^5.1.0", + "@types/node": "^18.0.4" + } + }, + "node_modules/@stacks/encryption": { + "version": "6.13.1", + "resolved": "https://registry.npmjs.org/@stacks/encryption/-/encryption-6.13.1.tgz", + "integrity": "sha512-y5IFX3/nGI3fCk70gE0JwH70GpshD8RhUfvhMLcL96oNaec1cCdj1ZUiQupeicfYTHuraaVBYU9xLls4TRmypg==", + "dependencies": { + "@noble/hashes": "1.1.5", + "@noble/secp256k1": "1.7.1", + "@scure/bip39": "1.1.0", + "@stacks/common": "^6.13.0", + "@types/node": "^18.0.4", + "base64-js": "^1.5.1", + "bs58": "^5.0.0", + "ripemd160-min": "^0.0.6", + "varuint-bitcoin": "^1.1.2" + } + }, + "node_modules/@stacks/network": { + "version": "6.13.0", + "resolved": "https://registry.npmjs.org/@stacks/network/-/network-6.13.0.tgz", + "integrity": "sha512-Ss/Da4BNyPBBj1OieM981fJ7SkevKqLPkzoI1+Yo7cYR2df+0FipIN++Z4RfpJpc8ne60vgcx7nJZXQsiGhKBQ==", + "dependencies": { + "@stacks/common": "^6.13.0", + "cross-fetch": "^3.1.5" + } + }, + "node_modules/@stacks/stacking": { + "version": "6.14.0", + "resolved": "https://registry.npmjs.org/@stacks/stacking/-/stacking-6.14.0.tgz", + "integrity": "sha512-P6ITXYpb5q4hgWMPimJW84mih3hQuQ0ko7AcnJ4SPy17nt1rxEz7/zgyRnqg1Lc18zt4HqfF9SKM7+Sqt/EMZA==", + "dependencies": { + "@noble/hashes": "1.1.5", + "@scure/base": "1.1.1", + "@stacks/common": "^6.13.0", + "@stacks/encryption": "^6.13.1", + "@stacks/network": "^6.13.0", + "@stacks/stacks-blockchain-api-types": "^0.61.0", + "@stacks/transactions": "^6.13.1", + "bs58": "^5.0.0" + } + }, + "node_modules/@stacks/stacking/node_modules/@scure/base": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/@scure/base/-/base-1.1.1.tgz", + "integrity": "sha512-ZxOhsSyxYwLJj3pLZCefNitxsj093tb2vq90mp2txoYeBqbcjDjqFhyM8eUjq/uFm6zJ+mUuqxlS2FkuSY1MTA==", + "funding": [ + { + "type": "individual", + "url": "https://paulmillr.com/funding/" + } + ] + }, + "node_modules/@stacks/stacks-blockchain-api-types": { + "version": "0.61.0", + "resolved": "https://registry.npmjs.org/@stacks/stacks-blockchain-api-types/-/stacks-blockchain-api-types-0.61.0.tgz", + "integrity": "sha512-yPOfTUboo5eA9BZL/hqMcM71GstrFs9YWzOrJFPeP4cOO1wgYvAcckgBRbgiE3NqeX0A7SLZLDAXLZbATuRq9w==" + }, + "node_modules/@stacks/transactions": { + "version": "6.13.1", + "resolved": "https://registry.npmjs.org/@stacks/transactions/-/transactions-6.13.1.tgz", + "integrity": "sha512-PWw2I+2Fj3CaFYQIoVcqQN6E2qGHNhFv03nuR0CxMq0sx8stPgYZbdzUlnlBcJQdsFiHrw3sPeqnXDZt+Hg5YQ==", + "dependencies": { + "@noble/hashes": "1.1.5", + "@noble/secp256k1": "1.7.1", + "@stacks/common": "^6.13.0", + "@stacks/network": "^6.13.0", + "c32check": "^2.0.0", + "lodash.clonedeep": "^4.5.0" + } + }, + "node_modules/@types/bn.js": { + "version": "5.1.5", + "resolved": "https://registry.npmjs.org/@types/bn.js/-/bn.js-5.1.5.tgz", + "integrity": "sha512-V46N0zwKRF5Q00AZ6hWtN0T8gGmDUaUzLWQvHFo5yThtVwK/VCenFY3wXVbOvNfajEpsTfQM4IN9k/d6gUVX3A==", + "dependencies": { + "@types/node": "*" + } + }, + "node_modules/@types/estree": { + "version": "1.0.5", + "resolved": "https://registry.npmjs.org/@types/estree/-/estree-1.0.5.tgz", + "integrity": "sha512-/kYRxGDLWzHOB7q+wtSUQlFrtcdUccpfy+X+9iMBpHK8QLLhx2wIPYuS5DYtR9Wa/YlZAbIovy7qVdB1Aq6Lyw==" + }, + "node_modules/@types/node": { + "version": "18.19.31", + "resolved": "https://registry.npmjs.org/@types/node/-/node-18.19.31.tgz", + "integrity": "sha512-ArgCD39YpyyrtFKIqMDvjz79jto5fcI/SVUs2HwB+f0dAzq68yqOdyaSivLiLugSziTpNXLQrVb7RZFmdZzbhA==", + "dependencies": { + "undici-types": "~5.26.4" + } + }, + "node_modules/@vitest/expect": { + "version": "1.5.2", + "resolved": "https://registry.npmjs.org/@vitest/expect/-/expect-1.5.2.tgz", + "integrity": "sha512-rf7MTD1WCoDlN3FfYJ9Llfp0PbdtOMZ3FIF0AVkDnKbp3oiMW1c8AmvRZBcqbAhDUAvF52e9zx4WQM1r3oraVA==", + "dependencies": { + "@vitest/spy": "1.5.2", + "@vitest/utils": "1.5.2", + "chai": "^4.3.10" + }, + "funding": { + "url": "https://opencollective.com/vitest" + } + }, + "node_modules/@vitest/runner": { + "version": "1.5.2", + "resolved": "https://registry.npmjs.org/@vitest/runner/-/runner-1.5.2.tgz", + "integrity": "sha512-7IJ7sJhMZrqx7HIEpv3WrMYcq8ZNz9L6alo81Y6f8hV5mIE6yVZsFoivLZmr0D777klm1ReqonE9LyChdcmw6g==", + "dependencies": { + "@vitest/utils": "1.5.2", + "p-limit": "^5.0.0", + "pathe": "^1.1.1" + }, + "funding": { + "url": "https://opencollective.com/vitest" + } + }, + "node_modules/@vitest/snapshot": { + "version": "1.5.2", + "resolved": "https://registry.npmjs.org/@vitest/snapshot/-/snapshot-1.5.2.tgz", + "integrity": "sha512-CTEp/lTYos8fuCc9+Z55Ga5NVPKUgExritjF5VY7heRFUfheoAqBneUlvXSUJHUZPjnPmyZA96yLRJDP1QATFQ==", + "dependencies": { + "magic-string": "^0.30.5", + "pathe": "^1.1.1", + "pretty-format": "^29.7.0" + }, + "funding": { + "url": "https://opencollective.com/vitest" + } + }, + "node_modules/@vitest/spy": { + "version": "1.5.2", + "resolved": "https://registry.npmjs.org/@vitest/spy/-/spy-1.5.2.tgz", + "integrity": "sha512-xCcPvI8JpCtgikT9nLpHPL1/81AYqZy1GCy4+MCHBE7xi8jgsYkULpW5hrx5PGLgOQjUpb6fd15lqcriJ40tfQ==", + "dependencies": { + "tinyspy": "^2.2.0" + }, + "funding": { + "url": "https://opencollective.com/vitest" + } + }, + "node_modules/@vitest/utils": { + "version": "1.5.2", + "resolved": "https://registry.npmjs.org/@vitest/utils/-/utils-1.5.2.tgz", + "integrity": "sha512-sWOmyofuXLJ85VvXNsroZur7mOJGiQeM0JN3/0D1uU8U9bGFM69X1iqHaRXl6R8BwaLY6yPCogP257zxTzkUdA==", + "dependencies": { + "diff-sequences": "^29.6.3", + "estree-walker": "^3.0.3", + "loupe": "^2.3.7", + "pretty-format": "^29.7.0" + }, + "funding": { + "url": "https://opencollective.com/vitest" + } + }, + "node_modules/acorn": { + "version": "8.11.3", + "resolved": "https://registry.npmjs.org/acorn/-/acorn-8.11.3.tgz", + "integrity": "sha512-Y9rRfJG5jcKOE0CLisYbojUjIrIEE7AGMzA/Sm4BslANhbS+cDMpgBdcPT91oJ7OuJ9hYJBx59RjbhxVnrF8Xg==", + "bin": { + "acorn": "bin/acorn" + }, + "engines": { + "node": ">=0.4.0" + } + }, + "node_modules/acorn-walk": { + "version": "8.3.2", + "resolved": "https://registry.npmjs.org/acorn-walk/-/acorn-walk-8.3.2.tgz", + "integrity": "sha512-cjkyv4OtNCIeqhHrfS81QWXoCBPExR/J62oyEqepVw8WaQeSqpW2uhuLPh1m9eWhDuOo/jUXVTlifvesOWp/4A==", + "engines": { + "node": ">=0.4.0" + } + }, + "node_modules/ansi-regex": { + "version": "5.0.1", + "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-5.0.1.tgz", + "integrity": "sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ==", + "engines": { + "node": ">=8" + } + }, + "node_modules/ansi-styles": { + "version": "5.2.0", + "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-5.2.0.tgz", + "integrity": "sha512-Cxwpt2SfTzTtXcfOlzGEee8O+c+MmUgGrNiBcXnuWxuFJHe6a5Hz7qwhwe5OgaSYI0IJvkLqWX1ASG+cJOkEiA==", + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/chalk/ansi-styles?sponsor=1" + } + }, + "node_modules/anymatch": { + "version": "3.1.3", + "resolved": "https://registry.npmjs.org/anymatch/-/anymatch-3.1.3.tgz", + "integrity": "sha512-KMReFUr0B4t+D+OBkjR3KYqvocp2XaSzO55UcB6mgQMd3KbcE+mWTyvVV7D/zsdEbNnV6acZUutkiHQXvTr1Rw==", + "dependencies": { + "normalize-path": "^3.0.0", + "picomatch": "^2.0.4" + }, + "engines": { + "node": ">= 8" + } + }, + "node_modules/assertion-error": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/assertion-error/-/assertion-error-1.1.0.tgz", + "integrity": "sha512-jgsaNduz+ndvGyFt3uSuWqvy4lCnIJiovtouQN5JZHOKCS2QuhEdbcQHFhVksz2N2U9hXJo8odG7ETyWlEeuDw==", + "engines": { + "node": "*" + } + }, + "node_modules/base-x": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/base-x/-/base-x-4.0.0.tgz", + "integrity": "sha512-FuwxlW4H5kh37X/oW59pwTzzTKRzfrrQwhmyspRM7swOEZcHtDZSCt45U6oKgtuFE+WYPblePMVIPR4RZrh/hw==" + }, + "node_modules/base64-js": { + "version": "1.5.1", + "resolved": "https://registry.npmjs.org/base64-js/-/base64-js-1.5.1.tgz", + "integrity": "sha512-AKpaYlHn8t4SVbOHCy+b5+KKgvR4vrsD8vbvrbiQJps7fKDTkjkDry6ji0rUJjC0kzbNePLwzxq8iypo41qeWA==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/feross" + }, + { + "type": "patreon", + "url": "https://www.patreon.com/feross" + }, + { + "type": "consulting", + "url": "https://feross.org/support" + } + ] + }, + "node_modules/binary-extensions": { + "version": "2.3.0", + "resolved": "https://registry.npmjs.org/binary-extensions/-/binary-extensions-2.3.0.tgz", + "integrity": "sha512-Ceh+7ox5qe7LJuLHoY0feh3pHuUDHAcRUeyL2VYghZwfpkNIy/+8Ocg0a3UuSoYzavmylwuLWQOf3hl0jjMMIw==", + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/braces": { + "version": "3.0.2", + "resolved": "https://registry.npmjs.org/braces/-/braces-3.0.2.tgz", + "integrity": "sha512-b8um+L1RzM3WDSzvhm6gIz1yfTbBt6YTlcEKAvsmqCZZFw46z626lVj9j1yEPW33H5H+lBQpZMP1k8l+78Ha0A==", + "dependencies": { + "fill-range": "^7.0.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/bs58": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/bs58/-/bs58-5.0.0.tgz", + "integrity": "sha512-r+ihvQJvahgYT50JD05dyJNKlmmSlMoOGwn1lCcEzanPglg7TxYjioQUYehQ9mAR/+hOSd2jRc/Z2y5UxBymvQ==", + "dependencies": { + "base-x": "^4.0.0" + } + }, + "node_modules/c32check": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/c32check/-/c32check-2.0.0.tgz", + "integrity": "sha512-rpwfAcS/CMqo0oCqDf3r9eeLgScRE3l/xHDCXhM3UyrfvIn7PrLq63uHh7yYbv8NzaZn5MVsVhIRpQ+5GZ5HyA==", + "dependencies": { + "@noble/hashes": "^1.1.2", + "base-x": "^4.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/cac": { + "version": "6.7.14", + "resolved": "https://registry.npmjs.org/cac/-/cac-6.7.14.tgz", + "integrity": "sha512-b6Ilus+c3RrdDk+JhLKUAQfzzgLEPy6wcXqS7f/xe1EETvsDP6GORG7SFuOs6cID5YkqchW/LXZbX5bc8j7ZcQ==", + "engines": { + "node": ">=8" + } + }, + "node_modules/camelcase": { + "version": "5.3.1", + "resolved": "https://registry.npmjs.org/camelcase/-/camelcase-5.3.1.tgz", + "integrity": "sha512-L28STB170nwWS63UjtlEOE3dldQApaJXZkOI1uMFfzf3rRuPegHaHesyee+YxQ+W6SvRDQV6UrdOdRiR153wJg==", + "engines": { + "node": ">=6" + } + }, + "node_modules/chai": { + "version": "4.4.1", + "resolved": "https://registry.npmjs.org/chai/-/chai-4.4.1.tgz", + "integrity": "sha512-13sOfMv2+DWduEU+/xbun3LScLoqN17nBeTLUsmDfKdoiC1fr0n9PU4guu4AhRcOVFk/sW8LyZWHuhWtQZiF+g==", + "dependencies": { + "assertion-error": "^1.1.0", + "check-error": "^1.0.3", + "deep-eql": "^4.1.3", + "get-func-name": "^2.0.2", + "loupe": "^2.3.6", + "pathval": "^1.1.1", + "type-detect": "^4.0.8" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/check-error": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/check-error/-/check-error-1.0.3.tgz", + "integrity": "sha512-iKEoDYaRmd1mxM90a2OEfWhjsjPpYPuQ+lMYsoxB126+t8fw7ySEO48nmDg5COTjxDI65/Y2OWpeEHk3ZOe8zg==", + "dependencies": { + "get-func-name": "^2.0.2" + }, + "engines": { + "node": "*" + } + }, + "node_modules/chokidar": { + "version": "3.6.0", + "resolved": "https://registry.npmjs.org/chokidar/-/chokidar-3.6.0.tgz", + "integrity": "sha512-7VT13fmjotKpGipCW9JEQAusEPE+Ei8nl6/g4FBAmIm0GOOLMua9NDDo/DWp0ZAxCr3cPq5ZpBqmPAQgDda2Pw==", + "dependencies": { + "anymatch": "~3.1.2", + "braces": "~3.0.2", + "glob-parent": "~5.1.2", + "is-binary-path": "~2.1.0", + "is-glob": "~4.0.1", + "normalize-path": "~3.0.0", + "readdirp": "~3.6.0" + }, + "engines": { + "node": ">= 8.10.0" + }, + "funding": { + "url": "https://paulmillr.com/funding/" + }, + "optionalDependencies": { + "fsevents": "~2.3.2" + } + }, + "node_modules/chokidar-cli": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/chokidar-cli/-/chokidar-cli-3.0.0.tgz", + "integrity": "sha512-xVW+Qeh7z15uZRxHOkP93Ux8A0xbPzwK4GaqD8dQOYc34TlkqUhVSS59fK36DOp5WdJlrRzlYSy02Ht99FjZqQ==", + "dependencies": { + "chokidar": "^3.5.2", + "lodash.debounce": "^4.0.8", + "lodash.throttle": "^4.1.1", + "yargs": "^13.3.0" + }, + "bin": { + "chokidar": "index.js" + }, + "engines": { + "node": ">= 8.10.0" + } + }, + "node_modules/chokidar-cli/node_modules/ansi-regex": { + "version": "4.1.1", + "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-4.1.1.tgz", + "integrity": "sha512-ILlv4k/3f6vfQ4OoP2AGvirOktlQ98ZEL1k9FaQjxa3L1abBgbuTDAdPOpvbGncC0BTVQrl+OM8xZGK6tWXt7g==", + "engines": { + "node": ">=6" + } + }, + "node_modules/chokidar-cli/node_modules/ansi-styles": { + "version": "3.2.1", + "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-3.2.1.tgz", + "integrity": "sha512-VT0ZI6kZRdTh8YyJw3SMbYm/u+NqfsAxEpWO0Pf9sq8/e94WxxOpPKx9FR1FlyCtOVDNOQ+8ntlqFxiRc+r5qA==", + "dependencies": { + "color-convert": "^1.9.0" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/chokidar-cli/node_modules/cliui": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/cliui/-/cliui-5.0.0.tgz", + "integrity": "sha512-PYeGSEmmHM6zvoef2w8TPzlrnNpXIjTipYK780YswmIP9vjxmd6Y2a3CB2Ks6/AU8NHjZugXvo8w3oWM2qnwXA==", + "dependencies": { + "string-width": "^3.1.0", + "strip-ansi": "^5.2.0", + "wrap-ansi": "^5.1.0" + } + }, + "node_modules/chokidar-cli/node_modules/emoji-regex": { + "version": "7.0.3", + "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-7.0.3.tgz", + "integrity": "sha512-CwBLREIQ7LvYFB0WyRvwhq5N5qPhc6PMjD6bYggFlI5YyDgl+0vxq5VHbMOFqLg7hfWzmu8T5Z1QofhmTIhItA==" + }, + "node_modules/chokidar-cli/node_modules/is-fullwidth-code-point": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/is-fullwidth-code-point/-/is-fullwidth-code-point-2.0.0.tgz", + "integrity": "sha512-VHskAKYM8RfSFXwee5t5cbN5PZeq1Wrh6qd5bkyiXIf6UQcN6w/A0eXM9r6t8d+GYOh+o6ZhiEnb88LN/Y8m2w==", + "engines": { + "node": ">=4" + } + }, + "node_modules/chokidar-cli/node_modules/string-width": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/string-width/-/string-width-3.1.0.tgz", + "integrity": "sha512-vafcv6KjVZKSgz06oM/H6GDBrAtz8vdhQakGjFIvNrHA6y3HCF1CInLy+QLq8dTJPQ1b+KDUqDFctkdRW44e1w==", + "dependencies": { + "emoji-regex": "^7.0.1", + "is-fullwidth-code-point": "^2.0.0", + "strip-ansi": "^5.1.0" + }, + "engines": { + "node": ">=6" + } + }, + "node_modules/chokidar-cli/node_modules/strip-ansi": { + "version": "5.2.0", + "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-5.2.0.tgz", + "integrity": "sha512-DuRs1gKbBqsMKIZlrffwlug8MHkcnpjs5VPmL1PAh+mA30U0DTotfDZ0d2UUsXpPmPmMMJ6W773MaA3J+lbiWA==", + "dependencies": { + "ansi-regex": "^4.1.0" + }, + "engines": { + "node": ">=6" + } + }, + "node_modules/chokidar-cli/node_modules/wrap-ansi": { + "version": "5.1.0", + "resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-5.1.0.tgz", + "integrity": "sha512-QC1/iN/2/RPVJ5jYK8BGttj5z83LmSKmvbvrXPNCLZSEb32KKVDJDl/MOt2N01qU2H/FkzEa9PKto1BqDjtd7Q==", + "dependencies": { + "ansi-styles": "^3.2.0", + "string-width": "^3.0.0", + "strip-ansi": "^5.0.0" + }, + "engines": { + "node": ">=6" + } + }, + "node_modules/chokidar-cli/node_modules/y18n": { + "version": "4.0.3", + "resolved": "https://registry.npmjs.org/y18n/-/y18n-4.0.3.tgz", + "integrity": "sha512-JKhqTOwSrqNA1NY5lSztJ1GrBiUodLMmIZuLiDaMRJ+itFd+ABVE8XBjOvIWL+rSqNDC74LCSFmlb/U4UZ4hJQ==" + }, + "node_modules/chokidar-cli/node_modules/yargs": { + "version": "13.3.2", + "resolved": "https://registry.npmjs.org/yargs/-/yargs-13.3.2.tgz", + "integrity": "sha512-AX3Zw5iPruN5ie6xGRIDgqkT+ZhnRlZMLMHAs8tg7nRruy2Nb+i5o9bwghAogtM08q1dpr2LVoS8KSTMYpWXUw==", + "dependencies": { + "cliui": "^5.0.0", + "find-up": "^3.0.0", + "get-caller-file": "^2.0.1", + "require-directory": "^2.1.1", + "require-main-filename": "^2.0.0", + "set-blocking": "^2.0.0", + "string-width": "^3.0.0", + "which-module": "^2.0.0", + "y18n": "^4.0.0", + "yargs-parser": "^13.1.2" + } + }, + "node_modules/chokidar-cli/node_modules/yargs-parser": { + "version": "13.1.2", + "resolved": "https://registry.npmjs.org/yargs-parser/-/yargs-parser-13.1.2.tgz", + "integrity": "sha512-3lbsNRf/j+A4QuSZfDRA7HRSfWrzO0YjqTJd5kjAq37Zep1CEgaYmrH9Q3GwPiB9cHyd1Y1UwggGhJGoxipbzg==", + "dependencies": { + "camelcase": "^5.0.0", + "decamelize": "^1.2.0" + } + }, + "node_modules/cliui": { + "version": "8.0.1", + "resolved": "https://registry.npmjs.org/cliui/-/cliui-8.0.1.tgz", + "integrity": "sha512-BSeNnyus75C4//NQ9gQt1/csTXyo/8Sb+afLAkzAptFuMsod9HFokGNudZpi/oQV73hnVK+sR+5PVRMd+Dr7YQ==", + "dependencies": { + "string-width": "^4.2.0", + "strip-ansi": "^6.0.1", + "wrap-ansi": "^7.0.0" + }, + "engines": { + "node": ">=12" + } + }, + "node_modules/color-convert": { + "version": "1.9.3", + "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-1.9.3.tgz", + "integrity": "sha512-QfAUtd+vFdAtFQcC8CCyYt1fYWxSqAiK2cSD6zDB8N3cpsEBAvRxp9zOGg6G/SHHJYAT88/az/IuDGALsNVbGg==", + "dependencies": { + "color-name": "1.1.3" + } + }, + "node_modules/color-name": { + "version": "1.1.3", + "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.3.tgz", + "integrity": "sha512-72fSenhMw2HZMTVHeCA9KCmpEIbzWiQsjN+BHcBbS9vr1mtt+vJjPdksIBNUmKAW8TFUDPJK5SUU3QhE9NEXDw==" + }, + "node_modules/confbox": { + "version": "0.1.7", + "resolved": "https://registry.npmjs.org/confbox/-/confbox-0.1.7.tgz", + "integrity": "sha512-uJcB/FKZtBMCJpK8MQji6bJHgu1tixKPxRLeGkNzBoOZzpnZUJm0jm2/sBDWcuBx1dYgxV4JU+g5hmNxCyAmdA==" + }, + "node_modules/cross-fetch": { + "version": "3.1.8", + "resolved": "https://registry.npmjs.org/cross-fetch/-/cross-fetch-3.1.8.tgz", + "integrity": "sha512-cvA+JwZoU0Xq+h6WkMvAUqPEYy92Obet6UdKLfW60qn99ftItKjB5T+BkyWOFWe2pUyfQ+IJHmpOTznqk1M6Kg==", + "dependencies": { + "node-fetch": "^2.6.12" + } + }, + "node_modules/cross-spawn": { + "version": "7.0.3", + "resolved": "https://registry.npmjs.org/cross-spawn/-/cross-spawn-7.0.3.tgz", + "integrity": "sha512-iRDPJKUPVEND7dHPO8rkbOnPpyDygcDFtWjpeWNCgy8WP2rXcxXL8TskReQl6OrB2G7+UJrags1q15Fudc7G6w==", + "dependencies": { + "path-key": "^3.1.0", + "shebang-command": "^2.0.0", + "which": "^2.0.1" + }, + "engines": { + "node": ">= 8" + } + }, + "node_modules/debug": { + "version": "4.3.4", + "resolved": "https://registry.npmjs.org/debug/-/debug-4.3.4.tgz", + "integrity": "sha512-PRWFHuSU3eDtQJPvnNY7Jcket1j0t5OuOsFzPPzsekD52Zl8qUfFIPEiswXqIvHWGVHOgX+7G/vCNNhehwxfkQ==", + "dependencies": { + "ms": "2.1.2" + }, + "engines": { + "node": ">=6.0" + }, + "peerDependenciesMeta": { + "supports-color": { + "optional": true + } + } + }, + "node_modules/decamelize": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/decamelize/-/decamelize-1.2.0.tgz", + "integrity": "sha512-z2S+W9X73hAUUki+N+9Za2lBlun89zigOyGrsax+KUQ6wKW4ZoWpEYBkGhQjwAjjDCkWxhY0VKEhk8wzY7F5cA==", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/deep-eql": { + "version": "4.1.3", + "resolved": "https://registry.npmjs.org/deep-eql/-/deep-eql-4.1.3.tgz", + "integrity": "sha512-WaEtAOpRA1MQ0eohqZjpGD8zdI0Ovsm8mmFhaDN8dvDZzyoUMcYDnf5Y6iu7HTXxf8JDS23qWa4a+hKCDyOPzw==", + "dependencies": { + "type-detect": "^4.0.0" + }, + "engines": { + "node": ">=6" + } + }, + "node_modules/diff-sequences": { + "version": "29.6.3", + "resolved": "https://registry.npmjs.org/diff-sequences/-/diff-sequences-29.6.3.tgz", + "integrity": "sha512-EjePK1srD3P08o2j4f0ExnylqRs5B9tJjcp9t1krH2qRi8CCdsYfwe9JgSLurFBWwq4uOlipzfk5fHNvwFKr8Q==", + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/emoji-regex": { + "version": "8.0.0", + "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-8.0.0.tgz", + "integrity": "sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A==" + }, + "node_modules/esbuild": { + "version": "0.20.2", + "resolved": "https://registry.npmjs.org/esbuild/-/esbuild-0.20.2.tgz", + "integrity": "sha512-WdOOppmUNU+IbZ0PaDiTst80zjnrOkyJNHoKupIcVyU8Lvla3Ugx94VzkQ32Ijqd7UhHJy75gNWDMUekcrSJ6g==", + "hasInstallScript": true, + "bin": { + "esbuild": "bin/esbuild" + }, + "engines": { + "node": ">=12" + }, + "optionalDependencies": { + "@esbuild/aix-ppc64": "0.20.2", + "@esbuild/android-arm": "0.20.2", + "@esbuild/android-arm64": "0.20.2", + "@esbuild/android-x64": "0.20.2", + "@esbuild/darwin-arm64": "0.20.2", + "@esbuild/darwin-x64": "0.20.2", + "@esbuild/freebsd-arm64": "0.20.2", + "@esbuild/freebsd-x64": "0.20.2", + "@esbuild/linux-arm": "0.20.2", + "@esbuild/linux-arm64": "0.20.2", + "@esbuild/linux-ia32": "0.20.2", + "@esbuild/linux-loong64": "0.20.2", + "@esbuild/linux-mips64el": "0.20.2", + "@esbuild/linux-ppc64": "0.20.2", + "@esbuild/linux-riscv64": "0.20.2", + "@esbuild/linux-s390x": "0.20.2", + "@esbuild/linux-x64": "0.20.2", + "@esbuild/netbsd-x64": "0.20.2", + "@esbuild/openbsd-x64": "0.20.2", + "@esbuild/sunos-x64": "0.20.2", + "@esbuild/win32-arm64": "0.20.2", + "@esbuild/win32-ia32": "0.20.2", + "@esbuild/win32-x64": "0.20.2" + } + }, + "node_modules/escalade": { + "version": "3.1.2", + "resolved": "https://registry.npmjs.org/escalade/-/escalade-3.1.2.tgz", + "integrity": "sha512-ErCHMCae19vR8vQGe50xIsVomy19rg6gFu3+r3jkEO46suLMWBksvVyoGgQV+jOfl84ZSOSlmv6Gxa89PmTGmA==", + "engines": { + "node": ">=6" + } + }, + "node_modules/estree-walker": { + "version": "3.0.3", + "resolved": "https://registry.npmjs.org/estree-walker/-/estree-walker-3.0.3.tgz", + "integrity": "sha512-7RUKfXgSMMkzt6ZuXmqapOurLGPPfgj6l9uRZ7lRGolvk0y2yocc35LdcxKC5PQZdn2DMqioAQ2NoWcrTKmm6g==", + "dependencies": { + "@types/estree": "^1.0.0" + } + }, + "node_modules/execa": { + "version": "8.0.1", + "resolved": "https://registry.npmjs.org/execa/-/execa-8.0.1.tgz", + "integrity": "sha512-VyhnebXciFV2DESc+p6B+y0LjSm0krU4OgJN44qFAhBY0TJ+1V61tYD2+wHusZ6F9n5K+vl8k0sTy7PEfV4qpg==", + "dependencies": { + "cross-spawn": "^7.0.3", + "get-stream": "^8.0.1", + "human-signals": "^5.0.0", + "is-stream": "^3.0.0", + "merge-stream": "^2.0.0", + "npm-run-path": "^5.1.0", + "onetime": "^6.0.0", + "signal-exit": "^4.1.0", + "strip-final-newline": "^3.0.0" + }, + "engines": { + "node": ">=16.17" + }, + "funding": { + "url": "https://github.com/sindresorhus/execa?sponsor=1" + } + }, + "node_modules/fill-range": { + "version": "7.0.1", + "resolved": "https://registry.npmjs.org/fill-range/-/fill-range-7.0.1.tgz", + "integrity": "sha512-qOo9F+dMUmC2Lcb4BbVvnKJxTPjCm+RRpe4gDuGrzkL7mEVl/djYSu2OdQ2Pa302N4oqkSg9ir6jaLWJ2USVpQ==", + "dependencies": { + "to-regex-range": "^5.0.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/find-up": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/find-up/-/find-up-3.0.0.tgz", + "integrity": "sha512-1yD6RmLI1XBfxugvORwlck6f75tYL+iR0jqwsOrOxMZyGYqUuDhJ0l4AXdO1iX/FTs9cBAMEk1gWSEx1kSbylg==", + "dependencies": { + "locate-path": "^3.0.0" + }, + "engines": { + "node": ">=6" + } + }, + "node_modules/fsevents": { + "version": "2.3.3", + "resolved": "https://registry.npmjs.org/fsevents/-/fsevents-2.3.3.tgz", + "integrity": "sha512-5xoDfX+fL7faATnagmWPpbFtwh/R77WmMMqqHGS65C3vvB0YHrgF+B1YmZ3441tMj5n63k0212XNoJwzlhffQw==", + "hasInstallScript": true, + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": "^8.16.0 || ^10.6.0 || >=11.0.0" + } + }, + "node_modules/get-caller-file": { + "version": "2.0.5", + "resolved": "https://registry.npmjs.org/get-caller-file/-/get-caller-file-2.0.5.tgz", + "integrity": "sha512-DyFP3BM/3YHTQOCUL/w0OZHR0lpKeGrxotcHWcqNEdnltqFwXVfhEBQ94eIo34AfQpo0rGki4cyIiftY06h2Fg==", + "engines": { + "node": "6.* || 8.* || >= 10.*" + } + }, + "node_modules/get-func-name": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/get-func-name/-/get-func-name-2.0.2.tgz", + "integrity": "sha512-8vXOvuE167CtIc3OyItco7N/dpRtBbYOsPsXCz7X/PMnlGjYjSGuZJgM1Y7mmew7BKf9BqvLX2tnOVy1BBUsxQ==", + "engines": { + "node": "*" + } + }, + "node_modules/get-stream": { + "version": "8.0.1", + "resolved": "https://registry.npmjs.org/get-stream/-/get-stream-8.0.1.tgz", + "integrity": "sha512-VaUJspBffn/LMCJVoMvSAdmscJyS1auj5Zulnn5UoYcY531UWmdwhRWkcGKnGU93m5HSXP9LP2usOryrBtQowA==", + "engines": { + "node": ">=16" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/glob-parent": { + "version": "5.1.2", + "resolved": "https://registry.npmjs.org/glob-parent/-/glob-parent-5.1.2.tgz", + "integrity": "sha512-AOIgSQCepiJYwP3ARnGx+5VnTu2HBYdzbGP45eLw1vr3zB3vZLeyed1sC9hnbcOc9/SrMyM5RPQrkGz4aS9Zow==", + "dependencies": { + "is-glob": "^4.0.1" + }, + "engines": { + "node": ">= 6" + } + }, + "node_modules/human-signals": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/human-signals/-/human-signals-5.0.0.tgz", + "integrity": "sha512-AXcZb6vzzrFAUE61HnN4mpLqd/cSIwNQjtNWR0euPm6y0iqx3G4gOXaIDdtdDwZmhwe82LA6+zinmW4UBWVePQ==", + "engines": { + "node": ">=16.17.0" + } + }, + "node_modules/is-binary-path": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/is-binary-path/-/is-binary-path-2.1.0.tgz", + "integrity": "sha512-ZMERYes6pDydyuGidse7OsHxtbI7WVeUEozgR/g7rd0xUimYNlvZRE/K2MgZTjWy725IfelLeVcEM97mmtRGXw==", + "dependencies": { + "binary-extensions": "^2.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/is-extglob": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/is-extglob/-/is-extglob-2.1.1.tgz", + "integrity": "sha512-SbKbANkN603Vi4jEZv49LeVJMn4yGwsbzZworEoyEiutsN3nJYdbO36zfhGJ6QEDpOZIFkDtnq5JRxmvl3jsoQ==", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/is-fullwidth-code-point": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/is-fullwidth-code-point/-/is-fullwidth-code-point-3.0.0.tgz", + "integrity": "sha512-zymm5+u+sCsSWyD9qNaejV3DFvhCKclKdizYaJUuHA83RLjb7nSuGnddCHGv0hk+KY7BMAlsWeK4Ueg6EV6XQg==", + "engines": { + "node": ">=8" + } + }, + "node_modules/is-glob": { + "version": "4.0.3", + "resolved": "https://registry.npmjs.org/is-glob/-/is-glob-4.0.3.tgz", + "integrity": "sha512-xelSayHH36ZgE7ZWhli7pW34hNbNl8Ojv5KVmkJD4hBdD3th8Tfk9vYasLM+mXWOZhFkgZfxhLSnrwRr4elSSg==", + "dependencies": { + "is-extglob": "^2.1.1" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/is-number": { + "version": "7.0.0", + "resolved": "https://registry.npmjs.org/is-number/-/is-number-7.0.0.tgz", + "integrity": "sha512-41Cifkg6e8TylSpdtTpeLVMqvSBEVzTttHvERD741+pnZ8ANv0004MRL43QKPDlK9cGvNp6NZWZUBlbGXYxxng==", + "engines": { + "node": ">=0.12.0" + } + }, + "node_modules/is-stream": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/is-stream/-/is-stream-3.0.0.tgz", + "integrity": "sha512-LnQR4bZ9IADDRSkvpqMGvt/tEJWclzklNgSw48V5EAaAeDd6qGvN8ei6k5p0tvxSR171VmGyHuTiAOfxAbr8kA==", + "engines": { + "node": "^12.20.0 || ^14.13.1 || >=16.0.0" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/isexe": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/isexe/-/isexe-2.0.0.tgz", + "integrity": "sha512-RHxMLp9lnKHGHRng9QFhRCMbYAcVpn69smSGcq3f36xjgVVWThj4qqLbTLlq7Ssj8B+fIQ1EuCEGI2lKsyQeIw==" + }, + "node_modules/js-tokens": { + "version": "9.0.0", + "resolved": "https://registry.npmjs.org/js-tokens/-/js-tokens-9.0.0.tgz", + "integrity": "sha512-WriZw1luRMlmV3LGJaR6QOJjWwgLUTf89OwT2lUOyjX2dJGBwgmIkbcz+7WFZjrZM635JOIR517++e/67CP9dQ==" + }, + "node_modules/kleur": { + "version": "3.0.3", + "resolved": "https://registry.npmjs.org/kleur/-/kleur-3.0.3.tgz", + "integrity": "sha512-eTIzlVOSUR+JxdDFepEYcBMtZ9Qqdef+rnzWdRZuMbOywu5tO2w2N7rqjoANZ5k9vywhL6Br1VRjUIgTQx4E8w==", + "engines": { + "node": ">=6" + } + }, + "node_modules/kolorist": { + "version": "1.8.0", + "resolved": "https://registry.npmjs.org/kolorist/-/kolorist-1.8.0.tgz", + "integrity": "sha512-Y+60/zizpJ3HRH8DCss+q95yr6145JXZo46OTpFvDZWLfRCE4qChOyk1b26nMaNpfHHgxagk9dXT5OP0Tfe+dQ==" + }, + "node_modules/local-pkg": { + "version": "0.5.0", + "resolved": "https://registry.npmjs.org/local-pkg/-/local-pkg-0.5.0.tgz", + "integrity": "sha512-ok6z3qlYyCDS4ZEU27HaU6x/xZa9Whf8jD4ptH5UZTQYZVYeb9bnZ3ojVhiJNLiXK1Hfc0GNbLXcmZ5plLDDBg==", + "dependencies": { + "mlly": "^1.4.2", + "pkg-types": "^1.0.3" + }, + "engines": { + "node": ">=14" + }, + "funding": { + "url": "https://github.com/sponsors/antfu" + } + }, + "node_modules/locate-path": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/locate-path/-/locate-path-3.0.0.tgz", + "integrity": "sha512-7AO748wWnIhNqAuaty2ZWHkQHRSNfPVIsPIfwEOWO22AmaoVrWavlOcMR5nzTLNYvp36X220/maaRsrec1G65A==", + "dependencies": { + "p-locate": "^3.0.0", + "path-exists": "^3.0.0" + }, + "engines": { + "node": ">=6" + } + }, + "node_modules/lodash.clonedeep": { + "version": "4.5.0", + "resolved": "https://registry.npmjs.org/lodash.clonedeep/-/lodash.clonedeep-4.5.0.tgz", + "integrity": "sha512-H5ZhCF25riFd9uB5UCkVKo61m3S/xZk1x4wA6yp/L3RFP6Z/eHH1ymQcGLo7J3GMPfm0V/7m1tryHuGVxpqEBQ==" + }, + "node_modules/lodash.debounce": { + "version": "4.0.8", + "resolved": "https://registry.npmjs.org/lodash.debounce/-/lodash.debounce-4.0.8.tgz", + "integrity": "sha512-FT1yDzDYEoYWhnSGnpE/4Kj1fLZkDFyqRb7fNt6FdYOSxlUWAtp42Eh6Wb0rGIv/m9Bgo7x4GhQbm5Ys4SG5ow==" + }, + "node_modules/lodash.throttle": { + "version": "4.1.1", + "resolved": "https://registry.npmjs.org/lodash.throttle/-/lodash.throttle-4.1.1.tgz", + "integrity": "sha512-wIkUCfVKpVsWo3JSZlc+8MB5it+2AN5W8J7YVMST30UrvcQNZ1Okbj+rbVniijTWE6FGYy4XJq/rHkas8qJMLQ==" + }, + "node_modules/loupe": { + "version": "2.3.7", + "resolved": "https://registry.npmjs.org/loupe/-/loupe-2.3.7.tgz", + "integrity": "sha512-zSMINGVYkdpYSOBmLi0D1Uo7JU9nVdQKrHxC8eYlV+9YKK9WePqAlL7lSlorG/U2Fw1w0hTBmaa/jrQ3UbPHtA==", + "dependencies": { + "get-func-name": "^2.0.1" + } + }, + "node_modules/magic-string": { + "version": "0.30.10", + "resolved": "https://registry.npmjs.org/magic-string/-/magic-string-0.30.10.tgz", + "integrity": "sha512-iIRwTIf0QKV3UAnYK4PU8uiEc4SRh5jX0mwpIwETPpHdhVM4f53RSwS/vXvN1JhGX+Cs7B8qIq3d6AH49O5fAQ==", + "dependencies": { + "@jridgewell/sourcemap-codec": "^1.4.15" + } + }, + "node_modules/merge-stream": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/merge-stream/-/merge-stream-2.0.0.tgz", + "integrity": "sha512-abv/qOcuPfk3URPfDzmZU1LKmuw8kT+0nIHvKrKgFrwifol/doWcdA4ZqsWQ8ENrFKkd67Mfpo/LovbIUsbt3w==" + }, + "node_modules/mimic-fn": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/mimic-fn/-/mimic-fn-4.0.0.tgz", + "integrity": "sha512-vqiC06CuhBTUdZH+RYl8sFrL096vA45Ok5ISO6sE/Mr1jRbGH4Csnhi8f3wKVl7x8mO4Au7Ir9D3Oyv1VYMFJw==", + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/mlly": { + "version": "1.6.1", + "resolved": "https://registry.npmjs.org/mlly/-/mlly-1.6.1.tgz", + "integrity": "sha512-vLgaHvaeunuOXHSmEbZ9izxPx3USsk8KCQ8iC+aTlp5sKRSoZvwhHh5L9VbKSaVC6sJDqbyohIS76E2VmHIPAA==", + "dependencies": { + "acorn": "^8.11.3", + "pathe": "^1.1.2", + "pkg-types": "^1.0.3", + "ufo": "^1.3.2" + } + }, + "node_modules/ms": { + "version": "2.1.2", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.2.tgz", + "integrity": "sha512-sGkPx+VjMtmA6MX27oA4FBFELFCZZ4S4XqeGOXCv68tT+jb3vk/RyaKWP0PTKyWtmLSM0b+adUTEvbs1PEaH2w==" + }, + "node_modules/nanoid": { + "version": "3.3.7", + "resolved": "https://registry.npmjs.org/nanoid/-/nanoid-3.3.7.tgz", + "integrity": "sha512-eSRppjcPIatRIMC1U6UngP8XFcz8MQWGQdt1MTBQ7NaAmvXDfvNxbvWV3x2y6CdEUciCSsDHDQZbhYaB8QEo2g==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ], + "bin": { + "nanoid": "bin/nanoid.cjs" + }, + "engines": { + "node": "^10 || ^12 || ^13.7 || ^14 || >=15.0.1" + } + }, + "node_modules/node-fetch": { + "version": "2.7.0", + "resolved": "https://registry.npmjs.org/node-fetch/-/node-fetch-2.7.0.tgz", + "integrity": "sha512-c4FRfUm/dbcWZ7U+1Wq0AwCyFL+3nt2bEw05wfxSz+DWpWsitgmSgYmy2dQdWyKC1694ELPqMs/YzUSNozLt8A==", + "dependencies": { + "whatwg-url": "^5.0.0" + }, + "engines": { + "node": "4.x || >=6.0.0" + }, + "peerDependencies": { + "encoding": "^0.1.0" + }, + "peerDependenciesMeta": { + "encoding": { + "optional": true + } + } + }, + "node_modules/normalize-path": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/normalize-path/-/normalize-path-3.0.0.tgz", + "integrity": "sha512-6eZs5Ls3WtCisHWp9S2GUy8dqkpGi4BVSz3GaqiE6ezub0512ESztXUwUB6C6IKbQkY2Pnb/mD4WYojCRwcwLA==", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/npm-run-path": { + "version": "5.3.0", + "resolved": "https://registry.npmjs.org/npm-run-path/-/npm-run-path-5.3.0.tgz", + "integrity": "sha512-ppwTtiJZq0O/ai0z7yfudtBpWIoxM8yE6nHi1X47eFR2EWORqfbu6CnPlNsjeN683eT0qG6H/Pyf9fCcvjnnnQ==", + "dependencies": { + "path-key": "^4.0.0" + }, + "engines": { + "node": "^12.20.0 || ^14.13.1 || >=16.0.0" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/npm-run-path/node_modules/path-key": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/path-key/-/path-key-4.0.0.tgz", + "integrity": "sha512-haREypq7xkM7ErfgIyA0z+Bj4AGKlMSdlQE2jvJo6huWD1EdkKYV+G/T4nq0YEF2vgTT8kqMFKo1uHn950r4SQ==", + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/onetime": { + "version": "6.0.0", + "resolved": "https://registry.npmjs.org/onetime/-/onetime-6.0.0.tgz", + "integrity": "sha512-1FlR+gjXK7X+AsAHso35MnyN5KqGwJRi/31ft6x0M194ht7S+rWAvd7PHss9xSKMzE0asv1pyIHaJYq+BbacAQ==", + "dependencies": { + "mimic-fn": "^4.0.0" + }, + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/p-limit": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/p-limit/-/p-limit-5.0.0.tgz", + "integrity": "sha512-/Eaoq+QyLSiXQ4lyYV23f14mZRQcXnxfHrN0vCai+ak9G0pp9iEQukIIZq5NccEvwRB8PUnZT0KsOoDCINS1qQ==", + "dependencies": { + "yocto-queue": "^1.0.0" + }, + "engines": { + "node": ">=18" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/p-locate": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/p-locate/-/p-locate-3.0.0.tgz", + "integrity": "sha512-x+12w/To+4GFfgJhBEpiDcLozRJGegY+Ei7/z0tSLkMmxGZNybVMSfWj9aJn8Z5Fc7dBUNJOOVgPv2H7IwulSQ==", + "dependencies": { + "p-limit": "^2.0.0" + }, + "engines": { + "node": ">=6" + } + }, + "node_modules/p-locate/node_modules/p-limit": { + "version": "2.3.0", + "resolved": "https://registry.npmjs.org/p-limit/-/p-limit-2.3.0.tgz", + "integrity": "sha512-//88mFWSJx8lxCzwdAABTJL2MyWB12+eIY7MDL2SqLmAkeKU9qxRvWuSyTjm3FUmpBEMuFfckAIqEaVGUDxb6w==", + "dependencies": { + "p-try": "^2.0.0" + }, + "engines": { + "node": ">=6" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/p-try": { + "version": "2.2.0", + "resolved": "https://registry.npmjs.org/p-try/-/p-try-2.2.0.tgz", + "integrity": "sha512-R4nPAVTAU0B9D35/Gk3uJf/7XYbQcyohSKdvAxIRSNghFl4e71hVoGnBNQz9cWaXxO2I10KTC+3jMdvvoKw6dQ==", + "engines": { + "node": ">=6" + } + }, + "node_modules/path-exists": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/path-exists/-/path-exists-3.0.0.tgz", + "integrity": "sha512-bpC7GYwiDYQ4wYLe+FA8lhRjhQCMcQGuSgGGqDkg/QerRWw9CmGRT0iSOVRSZJ29NMLZgIzqaljJ63oaL4NIJQ==", + "engines": { + "node": ">=4" + } + }, + "node_modules/path-key": { + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/path-key/-/path-key-3.1.1.tgz", + "integrity": "sha512-ojmeN0qd+y0jszEtoY48r0Peq5dwMEkIlCOu6Q5f41lfkswXuKtYrhgoTpLnyIcHm24Uhqx+5Tqm2InSwLhE6Q==", + "engines": { + "node": ">=8" + } + }, + "node_modules/pathe": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/pathe/-/pathe-1.1.2.tgz", + "integrity": "sha512-whLdWMYL2TwI08hn8/ZqAbrVemu0LNaNNJZX73O6qaIdCTfXutsLhMkjdENX0qhsQ9uIimo4/aQOmXkoon2nDQ==" + }, + "node_modules/pathval": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/pathval/-/pathval-1.1.1.tgz", + "integrity": "sha512-Dp6zGqpTdETdR63lehJYPeIOqpiNBNtc7BpWSLrOje7UaIsE5aY92r/AunQA7rsXvet3lrJ3JnZX29UPTKXyKQ==", + "engines": { + "node": "*" + } + }, + "node_modules/picocolors": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/picocolors/-/picocolors-1.0.0.tgz", + "integrity": "sha512-1fygroTLlHu66zi26VoTDv8yRgm0Fccecssto+MhsZ0D/DGW2sm8E8AjW7NU5VVTRt5GxbeZ5qBuJr+HyLYkjQ==" + }, + "node_modules/picomatch": { + "version": "2.3.1", + "resolved": "https://registry.npmjs.org/picomatch/-/picomatch-2.3.1.tgz", + "integrity": "sha512-JU3teHTNjmE2VCGFzuY8EXzCDVwEqB2a8fsIvwaStHhAWJEeVd1o1QD80CU6+ZdEXXSLbSsuLwJjkCBWqRQUVA==", + "engines": { + "node": ">=8.6" + }, + "funding": { + "url": "https://github.com/sponsors/jonschlinkert" + } + }, + "node_modules/pkg-types": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/pkg-types/-/pkg-types-1.1.0.tgz", + "integrity": "sha512-/RpmvKdxKf8uILTtoOhAgf30wYbP2Qw+L9p3Rvshx1JZVX+XQNZQFjlbmGHEGIm4CkVPlSn+NXmIM8+9oWQaSA==", + "dependencies": { + "confbox": "^0.1.7", + "mlly": "^1.6.1", + "pathe": "^1.1.2" + } + }, + "node_modules/postcss": { + "version": "8.4.38", + "resolved": "https://registry.npmjs.org/postcss/-/postcss-8.4.38.tgz", + "integrity": "sha512-Wglpdk03BSfXkHoQa3b/oulrotAkwrlLDRSOb9D0bN86FdRyE9lppSp33aHNPgBa0JKCoB+drFLZkQoRRYae5A==", + "funding": [ + { + "type": "opencollective", + "url": "https://opencollective.com/postcss/" + }, + { + "type": "tidelift", + "url": "https://tidelift.com/funding/github/npm/postcss" + }, + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ], + "dependencies": { + "nanoid": "^3.3.7", + "picocolors": "^1.0.0", + "source-map-js": "^1.2.0" + }, + "engines": { + "node": "^10 || ^12 || >=14" + } + }, + "node_modules/pretty-format": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/pretty-format/-/pretty-format-29.7.0.tgz", + "integrity": "sha512-Pdlw/oPxN+aXdmM9R00JVC9WVFoCLTKJvDVLgmJ+qAffBMxsV85l/Lu7sNx4zSzPyoL2euImuEwHhOXdEgNFZQ==", + "dependencies": { + "@jest/schemas": "^29.6.3", + "ansi-styles": "^5.0.0", + "react-is": "^18.0.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/prompts": { + "version": "2.4.2", + "resolved": "https://registry.npmjs.org/prompts/-/prompts-2.4.2.tgz", + "integrity": "sha512-NxNv/kLguCA7p3jE8oL2aEBsrJWgAakBpgmgK6lpPWV+WuOmY6r2/zbAVnP+T8bQlA0nzHXSJSJW0Hq7ylaD2Q==", + "dependencies": { + "kleur": "^3.0.3", + "sisteransi": "^1.0.5" + }, + "engines": { + "node": ">= 6" + } + }, + "node_modules/react-is": { + "version": "18.3.0", + "resolved": "https://registry.npmjs.org/react-is/-/react-is-18.3.0.tgz", + "integrity": "sha512-wRiUsea88TjKDc4FBEn+sLvIDesp6brMbGWnJGjew2waAc9evdhja/2LvePc898HJbHw0L+MTWy7NhpnELAvLQ==" + }, + "node_modules/readdirp": { + "version": "3.6.0", + "resolved": "https://registry.npmjs.org/readdirp/-/readdirp-3.6.0.tgz", + "integrity": "sha512-hOS089on8RduqdbhvQ5Z37A0ESjsqz6qnRcffsMU3495FuTdqSm+7bhJ29JvIOsBDEEnan5DPu9t3To9VRlMzA==", + "dependencies": { + "picomatch": "^2.2.1" + }, + "engines": { + "node": ">=8.10.0" + } + }, + "node_modules/require-directory": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/require-directory/-/require-directory-2.1.1.tgz", + "integrity": "sha512-fGxEI7+wsG9xrvdjsrlmL22OMTTiHRwAMroiEeMgq8gzoLC/PQr7RsRDSTLUg/bZAZtF+TVIkHc6/4RIKrui+Q==", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/require-main-filename": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/require-main-filename/-/require-main-filename-2.0.0.tgz", + "integrity": "sha512-NKN5kMDylKuldxYLSUfrbo5Tuzh4hd+2E8NPPX02mZtn1VuREQToYe/ZdlJy+J3uCpfaiGF05e7B8W0iXbQHmg==" + }, + "node_modules/ripemd160-min": { + "version": "0.0.6", + "resolved": "https://registry.npmjs.org/ripemd160-min/-/ripemd160-min-0.0.6.tgz", + "integrity": "sha512-+GcJgQivhs6S9qvLogusiTcS9kQUfgR75whKuy5jIhuiOfQuJ8fjqxV6EGD5duH1Y/FawFUMtMhyeq3Fbnib8A==", + "engines": { + "node": ">=8" + } + }, + "node_modules/rollup": { + "version": "4.16.1", + "resolved": "https://registry.npmjs.org/rollup/-/rollup-4.16.1.tgz", + "integrity": "sha512-5CaD3MPDlPKfhqzRvWXK96G6ELJfPZNb3LHiZxTHgDdC6jvwfGz2E8nY+9g1ONk4ttHsK1WaFP19Js4PSr1E3g==", + "dependencies": { + "@types/estree": "1.0.5" + }, + "bin": { + "rollup": "dist/bin/rollup" + }, + "engines": { + "node": ">=18.0.0", + "npm": ">=8.0.0" + }, + "optionalDependencies": { + "@rollup/rollup-android-arm-eabi": "4.16.1", + "@rollup/rollup-android-arm64": "4.16.1", + "@rollup/rollup-darwin-arm64": "4.16.1", + "@rollup/rollup-darwin-x64": "4.16.1", + "@rollup/rollup-linux-arm-gnueabihf": "4.16.1", + "@rollup/rollup-linux-arm-musleabihf": "4.16.1", + "@rollup/rollup-linux-arm64-gnu": "4.16.1", + "@rollup/rollup-linux-arm64-musl": "4.16.1", + "@rollup/rollup-linux-powerpc64le-gnu": "4.16.1", + "@rollup/rollup-linux-riscv64-gnu": "4.16.1", + "@rollup/rollup-linux-s390x-gnu": "4.16.1", + "@rollup/rollup-linux-x64-gnu": "4.16.1", + "@rollup/rollup-linux-x64-musl": "4.16.1", + "@rollup/rollup-win32-arm64-msvc": "4.16.1", + "@rollup/rollup-win32-ia32-msvc": "4.16.1", + "@rollup/rollup-win32-x64-msvc": "4.16.1", + "fsevents": "~2.3.2" + } + }, + "node_modules/safe-buffer": { + "version": "5.2.1", + "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.2.1.tgz", + "integrity": "sha512-rp3So07KcdmmKbGvgaNxQSJr7bGVSVk5S9Eq1F+ppbRo70+YeaDxkw5Dd8NPN+GD6bjnYm2VuPuCXmpuYvmCXQ==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/feross" + }, + { + "type": "patreon", + "url": "https://www.patreon.com/feross" + }, + { + "type": "consulting", + "url": "https://feross.org/support" + } + ] + }, + "node_modules/set-blocking": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/set-blocking/-/set-blocking-2.0.0.tgz", + "integrity": "sha512-KiKBS8AnWGEyLzofFfmvKwpdPzqiy16LvQfK3yv/fVH7Bj13/wl3JSR1J+rfgRE9q7xUJK4qvgS8raSOeLUehw==" + }, + "node_modules/shebang-command": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/shebang-command/-/shebang-command-2.0.0.tgz", + "integrity": "sha512-kHxr2zZpYtdmrN1qDjrrX/Z1rR1kG8Dx+gkpK1G4eXmvXswmcE1hTWBWYUzlraYw1/yZp6YuDY77YtvbN0dmDA==", + "dependencies": { + "shebang-regex": "^3.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/shebang-regex": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/shebang-regex/-/shebang-regex-3.0.0.tgz", + "integrity": "sha512-7++dFhtcx3353uBaq8DDR4NuxBetBzC7ZQOhmTQInHEd6bSrXdiEyzCvG07Z44UYdLShWUyXt5M/yhz8ekcb1A==", + "engines": { + "node": ">=8" + } + }, + "node_modules/siginfo": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/siginfo/-/siginfo-2.0.0.tgz", + "integrity": "sha512-ybx0WO1/8bSBLEWXZvEd7gMW3Sn3JFlW3TvX1nREbDLRNQNaeNN8WK0meBwPdAaOI7TtRRRJn/Es1zhrrCHu7g==" + }, + "node_modules/signal-exit": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/signal-exit/-/signal-exit-4.1.0.tgz", + "integrity": "sha512-bzyZ1e88w9O1iNJbKnOlvYTrWPDl46O1bG0D3XInv+9tkPrxrN8jUUTiFlDkkmKWgn1M6CfIA13SuGqOa9Korw==", + "engines": { + "node": ">=14" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, + "node_modules/sisteransi": { + "version": "1.0.5", + "resolved": "https://registry.npmjs.org/sisteransi/-/sisteransi-1.0.5.tgz", + "integrity": "sha512-bLGGlR1QxBcynn2d5YmDX4MGjlZvy2MRBDRNHLJ8VI6l6+9FUiyTFNJ0IveOSP0bcXgVDPRcfGqA0pjaqUpfVg==" + }, + "node_modules/source-map-js": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/source-map-js/-/source-map-js-1.2.0.tgz", + "integrity": "sha512-itJW8lvSA0TXEphiRoawsCksnlf8SyvmFzIhltqAHluXd88pkCd+cXJVHTDwdCr0IzwptSm035IHQktUu1QUMg==", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/stackback": { + "version": "0.0.2", + "resolved": "https://registry.npmjs.org/stackback/-/stackback-0.0.2.tgz", + "integrity": "sha512-1XMJE5fQo1jGH6Y/7ebnwPOBEkIEnT4QF32d5R1+VXdXveM0IBMJt8zfaxX1P3QhVwrYe+576+jkANtSS2mBbw==" + }, + "node_modules/std-env": { + "version": "3.7.0", + "resolved": "https://registry.npmjs.org/std-env/-/std-env-3.7.0.tgz", + "integrity": "sha512-JPbdCEQLj1w5GilpiHAx3qJvFndqybBysA3qUOnznweH4QbNYUsW/ea8QzSrnh0vNsezMMw5bcVool8lM0gwzg==" + }, + "node_modules/string-width": { + "version": "4.2.3", + "resolved": "https://registry.npmjs.org/string-width/-/string-width-4.2.3.tgz", + "integrity": "sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g==", + "dependencies": { + "emoji-regex": "^8.0.0", + "is-fullwidth-code-point": "^3.0.0", + "strip-ansi": "^6.0.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/strip-ansi": { + "version": "6.0.1", + "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.1.tgz", + "integrity": "sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A==", + "dependencies": { + "ansi-regex": "^5.0.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/strip-final-newline": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/strip-final-newline/-/strip-final-newline-3.0.0.tgz", + "integrity": "sha512-dOESqjYr96iWYylGObzd39EuNTa5VJxyvVAEm5Jnh7KGo75V43Hk1odPQkNDyXNmUR6k+gEiDVXnjB8HJ3crXw==", + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/strip-literal": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/strip-literal/-/strip-literal-2.1.0.tgz", + "integrity": "sha512-Op+UycaUt/8FbN/Z2TWPBLge3jWrP3xj10f3fnYxf052bKuS3EKs1ZQcVGjnEMdsNVAM+plXRdmjrZ/KgG3Skw==", + "dependencies": { + "js-tokens": "^9.0.0" + }, + "funding": { + "url": "https://github.com/sponsors/antfu" + } + }, + "node_modules/tinybench": { + "version": "2.8.0", + "resolved": "https://registry.npmjs.org/tinybench/-/tinybench-2.8.0.tgz", + "integrity": "sha512-1/eK7zUnIklz4JUUlL+658n58XO2hHLQfSk1Zf2LKieUjxidN16eKFEoDEfjHc3ohofSSqK3X5yO6VGb6iW8Lw==" + }, + "node_modules/tinypool": { + "version": "0.8.4", + "resolved": "https://registry.npmjs.org/tinypool/-/tinypool-0.8.4.tgz", + "integrity": "sha512-i11VH5gS6IFeLY3gMBQ00/MmLncVP7JLXOw1vlgkytLmJK7QnEr7NXf0LBdxfmNPAeyetukOk0bOYrJrFGjYJQ==", + "engines": { + "node": ">=14.0.0" + } + }, + "node_modules/tinyspy": { + "version": "2.2.1", + "resolved": "https://registry.npmjs.org/tinyspy/-/tinyspy-2.2.1.tgz", + "integrity": "sha512-KYad6Vy5VDWV4GH3fjpseMQ/XU2BhIYP7Vzd0LG44qRWm/Yt2WCOTicFdvmgo6gWaqooMQCawTtILVQJupKu7A==", + "engines": { + "node": ">=14.0.0" + } + }, + "node_modules/to-regex-range": { + "version": "5.0.1", + "resolved": "https://registry.npmjs.org/to-regex-range/-/to-regex-range-5.0.1.tgz", + "integrity": "sha512-65P7iz6X5yEr1cwcgvQxbbIw7Uk3gOy5dIdtZ4rDveLqhrdJP+Li/Hx6tyK0NEb+2GCyneCMJiGqrADCSNk8sQ==", + "dependencies": { + "is-number": "^7.0.0" + }, + "engines": { + "node": ">=8.0" + } + }, + "node_modules/tr46": { + "version": "0.0.3", + "resolved": "https://registry.npmjs.org/tr46/-/tr46-0.0.3.tgz", + "integrity": "sha512-N3WMsuqV66lT30CrXNbEjx4GEwlow3v6rr4mCcv6prnfwhS01rkgyFdjPNBYd9br7LpXV1+Emh01fHnq2Gdgrw==" + }, + "node_modules/type-detect": { + "version": "4.0.8", + "resolved": "https://registry.npmjs.org/type-detect/-/type-detect-4.0.8.tgz", + "integrity": "sha512-0fr/mIH1dlO+x7TlcMy+bIDqKPsw/70tVyeHW787goQjhmqaZe10uwLujubK9q9Lg6Fiho1KUKDYz0Z7k7g5/g==", + "engines": { + "node": ">=4" + } + }, + "node_modules/typescript": { + "version": "5.4.5", + "resolved": "https://registry.npmjs.org/typescript/-/typescript-5.4.5.tgz", + "integrity": "sha512-vcI4UpRgg81oIRUFwR0WSIHKt11nJ7SAVlYNIu+QpqeyXP+gpQJy/Z4+F0aGxSE4MqwjyXvW/TzgkLAx2AGHwQ==", + "bin": { + "tsc": "bin/tsc", + "tsserver": "bin/tsserver" + }, + "engines": { + "node": ">=14.17" + } + }, + "node_modules/ufo": { + "version": "1.5.3", + "resolved": "https://registry.npmjs.org/ufo/-/ufo-1.5.3.tgz", + "integrity": "sha512-Y7HYmWaFwPUmkoQCUIAYpKqkOf+SbVj/2fJJZ4RJMCfZp0rTGwRbzQD+HghfnhKOjL9E01okqz+ncJskGYfBNw==" + }, + "node_modules/undici-types": { + "version": "5.26.5", + "resolved": "https://registry.npmjs.org/undici-types/-/undici-types-5.26.5.tgz", + "integrity": "sha512-JlCMO+ehdEIKqlFxk6IfVoAUVmgz7cU7zD/h9XZ0qzeosSHmUJVOzSQvvYSYWXkFXC+IfLKSIffhv0sVZup6pA==" + }, + "node_modules/varuint-bitcoin": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/varuint-bitcoin/-/varuint-bitcoin-1.1.2.tgz", + "integrity": "sha512-4EVb+w4rx+YfVM32HQX42AbbT7/1f5zwAYhIujKXKk8NQK+JfRVl3pqT3hjNn/L+RstigmGGKVwHA/P0wgITZw==", + "dependencies": { + "safe-buffer": "^5.1.1" + } + }, + "node_modules/vite": { + "version": "5.2.10", + "resolved": "https://registry.npmjs.org/vite/-/vite-5.2.10.tgz", + "integrity": "sha512-PAzgUZbP7msvQvqdSD+ErD5qGnSFiGOoWmV5yAKUEI0kdhjbH6nMWVyZQC/hSc4aXwc0oJ9aEdIiF9Oje0JFCw==", + "dependencies": { + "esbuild": "^0.20.1", + "postcss": "^8.4.38", + "rollup": "^4.13.0" + }, + "bin": { + "vite": "bin/vite.js" + }, + "engines": { + "node": "^18.0.0 || >=20.0.0" + }, + "funding": { + "url": "https://github.com/vitejs/vite?sponsor=1" + }, + "optionalDependencies": { + "fsevents": "~2.3.3" + }, + "peerDependencies": { + "@types/node": "^18.0.0 || >=20.0.0", + "less": "*", + "lightningcss": "^1.21.0", + "sass": "*", + "stylus": "*", + "sugarss": "*", + "terser": "^5.4.0" + }, + "peerDependenciesMeta": { + "@types/node": { + "optional": true + }, + "less": { + "optional": true + }, + "lightningcss": { + "optional": true + }, + "sass": { + "optional": true + }, + "stylus": { + "optional": true + }, + "sugarss": { + "optional": true + }, + "terser": { + "optional": true + } + } + }, + "node_modules/vite-node": { + "version": "1.5.2", + "resolved": "https://registry.npmjs.org/vite-node/-/vite-node-1.5.2.tgz", + "integrity": "sha512-Y8p91kz9zU+bWtF7HGt6DVw2JbhyuB2RlZix3FPYAYmUyZ3n7iTp8eSyLyY6sxtPegvxQtmlTMhfPhUfCUF93A==", + "dependencies": { + "cac": "^6.7.14", + "debug": "^4.3.4", + "pathe": "^1.1.1", + "picocolors": "^1.0.0", + "vite": "^5.0.0" + }, + "bin": { + "vite-node": "vite-node.mjs" + }, + "engines": { + "node": "^18.0.0 || >=20.0.0" + }, + "funding": { + "url": "https://opencollective.com/vitest" + } + }, + "node_modules/vitest": { + "version": "1.5.2", + "resolved": "https://registry.npmjs.org/vitest/-/vitest-1.5.2.tgz", + "integrity": "sha512-l9gwIkq16ug3xY7BxHwcBQovLZG75zZL0PlsiYQbf76Rz6QGs54416UWMtC0jXeihvHvcHrf2ROEjkQRVpoZYw==", + "dependencies": { + "@vitest/expect": "1.5.2", + "@vitest/runner": "1.5.2", + "@vitest/snapshot": "1.5.2", + "@vitest/spy": "1.5.2", + "@vitest/utils": "1.5.2", + "acorn-walk": "^8.3.2", + "chai": "^4.3.10", + "debug": "^4.3.4", + "execa": "^8.0.1", + "local-pkg": "^0.5.0", + "magic-string": "^0.30.5", + "pathe": "^1.1.1", + "picocolors": "^1.0.0", + "std-env": "^3.5.0", + "strip-literal": "^2.0.0", + "tinybench": "^2.5.1", + "tinypool": "^0.8.3", + "vite": "^5.0.0", + "vite-node": "1.5.2", + "why-is-node-running": "^2.2.2" + }, + "bin": { + "vitest": "vitest.mjs" + }, + "engines": { + "node": "^18.0.0 || >=20.0.0" + }, + "funding": { + "url": "https://opencollective.com/vitest" + }, + "peerDependencies": { + "@edge-runtime/vm": "*", + "@types/node": "^18.0.0 || >=20.0.0", + "@vitest/browser": "1.5.2", + "@vitest/ui": "1.5.2", + "happy-dom": "*", + "jsdom": "*" + }, + "peerDependenciesMeta": { + "@edge-runtime/vm": { + "optional": true + }, + "@types/node": { + "optional": true + }, + "@vitest/browser": { + "optional": true + }, + "@vitest/ui": { + "optional": true + }, + "happy-dom": { + "optional": true + }, + "jsdom": { + "optional": true + } + } + }, + "node_modules/vitest-environment-clarinet": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/vitest-environment-clarinet/-/vitest-environment-clarinet-2.1.0.tgz", + "integrity": "sha512-1SA9XZh47qmbV724sGo2FyjVU+Ar3m5TOU4bLGSlWDb/x388IKUPrHbHWqIQNwY+gwEm9VBfXEAd1LOSUdemBw==", + "peerDependencies": { + "@hirosystems/clarinet-sdk": ">=2.6.0", + "vitest": "^1.5.2" + } + }, + "node_modules/webidl-conversions": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/webidl-conversions/-/webidl-conversions-3.0.1.tgz", + "integrity": "sha512-2JAn3z8AR6rjK8Sm8orRC0h/bcl/DqL7tRPdGZ4I1CjdF+EaMLmYxBHyXuKL849eucPFhvBoxMsflfOb8kxaeQ==" + }, + "node_modules/whatwg-url": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/whatwg-url/-/whatwg-url-5.0.0.tgz", + "integrity": "sha512-saE57nupxk6v3HY35+jzBwYa0rKSy0XR8JSxZPwgLr7ys0IBzhGviA1/TUGJLmSVqs8pb9AnvICXEuOHLprYTw==", + "dependencies": { + "tr46": "~0.0.3", + "webidl-conversions": "^3.0.0" + } + }, + "node_modules/which": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/which/-/which-2.0.2.tgz", + "integrity": "sha512-BLI3Tl1TW3Pvl70l3yq3Y64i+awpwXqsGBYWkkqMtnbXgrMD+yj7rhW0kuEDxzJaYXGjEW5ogapKNMEKNMjibA==", + "dependencies": { + "isexe": "^2.0.0" + }, + "bin": { + "node-which": "bin/node-which" + }, + "engines": { + "node": ">= 8" + } + }, + "node_modules/which-module": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/which-module/-/which-module-2.0.1.tgz", + "integrity": "sha512-iBdZ57RDvnOR9AGBhML2vFZf7h8vmBjhoaZqODJBFWHVtKkDmKuHai3cx5PgVMrX5YDNp27AofYbAwctSS+vhQ==" + }, + "node_modules/why-is-node-running": { + "version": "2.2.2", + "resolved": "https://registry.npmjs.org/why-is-node-running/-/why-is-node-running-2.2.2.tgz", + "integrity": "sha512-6tSwToZxTOcotxHeA+qGCq1mVzKR3CwcJGmVcY+QE8SHy6TnpFnh8PAvPNHYr7EcuVeG0QSMxtYCuO1ta/G/oA==", + "dependencies": { + "siginfo": "^2.0.0", + "stackback": "0.0.2" + }, + "bin": { + "why-is-node-running": "cli.js" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/wrap-ansi": { + "version": "7.0.0", + "resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-7.0.0.tgz", + "integrity": "sha512-YVGIj2kamLSTxw6NsZjoBxfSwsn0ycdesmc4p+Q21c5zPuZ1pl+NfxVdxPtdHvmNVOQ6XSYG4AUtyt/Fi7D16Q==", + "dependencies": { + "ansi-styles": "^4.0.0", + "string-width": "^4.1.0", + "strip-ansi": "^6.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/chalk/wrap-ansi?sponsor=1" + } + }, + "node_modules/wrap-ansi/node_modules/ansi-styles": { + "version": "4.3.0", + "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz", + "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==", + "dependencies": { + "color-convert": "^2.0.1" + }, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/chalk/ansi-styles?sponsor=1" + } + }, + "node_modules/wrap-ansi/node_modules/color-convert": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz", + "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==", + "dependencies": { + "color-name": "~1.1.4" + }, + "engines": { + "node": ">=7.0.0" + } + }, + "node_modules/wrap-ansi/node_modules/color-name": { + "version": "1.1.4", + "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz", + "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==" + }, + "node_modules/y18n": { + "version": "5.0.8", + "resolved": "https://registry.npmjs.org/y18n/-/y18n-5.0.8.tgz", + "integrity": "sha512-0pfFzegeDWJHJIAmTLRP2DwHjdF5s7jo9tuztdQxAhINCdvS+3nGINqPd00AphqJR/0LhANUS6/+7SCb98YOfA==", + "engines": { + "node": ">=10" + } + }, + "node_modules/yargs": { + "version": "17.7.2", + "resolved": "https://registry.npmjs.org/yargs/-/yargs-17.7.2.tgz", + "integrity": "sha512-7dSzzRQ++CKnNI/krKnYRV7JKKPUXMEh61soaHKg9mrWEhzFWhFnxPxGl+69cD1Ou63C13NUPCnmIcrvqCuM6w==", + "dependencies": { + "cliui": "^8.0.1", + "escalade": "^3.1.1", + "get-caller-file": "^2.0.5", + "require-directory": "^2.1.1", + "string-width": "^4.2.3", + "y18n": "^5.0.5", + "yargs-parser": "^21.1.1" + }, + "engines": { + "node": ">=12" + } + }, + "node_modules/yargs-parser": { + "version": "21.1.1", + "resolved": "https://registry.npmjs.org/yargs-parser/-/yargs-parser-21.1.1.tgz", + "integrity": "sha512-tVpsJW7DdjecAiFpbIB1e3qxIQsE6NoPc5/eTdrbbIC4h0LVsWhnoa3g+m2HclBIujHzsxZ4VJVA+GUuc2/LBw==", + "engines": { + "node": ">=12" + } + }, + "node_modules/yocto-queue": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/yocto-queue/-/yocto-queue-1.0.0.tgz", + "integrity": "sha512-9bnSc/HEW2uRy67wc+T8UwauLuPJVn28jb+GtJY16iiKWyvmYJRXVT4UamsAEGQfPohgr2q4Tq0sQbQlxTfi1g==", + "engines": { + "node": ">=12.20" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + } + } +} diff --git a/contrib/boot-contracts-unit-tests/package.json b/contrib/boot-contracts-unit-tests/package.json new file mode 100644 index 00000000000..ffd2108a075 --- /dev/null +++ b/contrib/boot-contracts-unit-tests/package.json @@ -0,0 +1,26 @@ +{ + "name": "boot-contracts-unit-tests-tests", + "version": "1.0.0", + "description": "Run unit tests on this project.", + "type": "module", + "private": true, + "scripts": { + "test": "vitest run", + "test:report": "vitest run -- --coverage --costs", + "test:watch": "chokidar \"tests/**/*.ts\" \"contracts/**/*.clar\" -c \"npm run test:report\"" + }, + "author": "", + "license": "ISC", + "dependencies": { + "@hirosystems/clarinet-sdk": "^2.6.0", + "@stacks/encryption": "^6.13.1", + "@stacks/network": "^6.13.0", + "@stacks/stacking": "^6.14.0", + "@stacks/transactions": "^6.13.1", + "chokidar-cli": "^3.0.0", + "typescript": "^5.3.3", + "vite": "^5.1.4", + "vitest": "^1.5.2", + "vitest-environment-clarinet": "^2.1.0" + } +} diff --git a/contrib/boot-contracts-unit-tests/settings/Devnet.toml b/contrib/boot-contracts-unit-tests/settings/Devnet.toml new file mode 100644 index 00000000000..eb43b6be058 --- /dev/null +++ b/contrib/boot-contracts-unit-tests/settings/Devnet.toml @@ -0,0 +1,151 @@ +[network] +name = "devnet" +deployment_fee_rate = 10 + +[accounts.deployer] +mnemonic = "twice kind fence tip hidden tilt action fragile skin nothing glory cousin green tomorrow spring wrist shed math olympic multiply hip blue scout claw" +balance = 100_000_000_000_000 +# secret_key: 753b7cc01a1a2e86221266a154af739463fce51219d97e4f856cd7200c3bd2a601 +# stx_address: ST1PQHQKV0RJXZFY1DGX8MNSNYVE3VGZJSRTPGZGM +# btc_address: mqVnk6NPRdhntvfm4hh9vvjiRkFDUuSYsH + +[accounts.wallet_1] +mnemonic = "sell invite acquire kitten bamboo drastic jelly vivid peace spawn twice guilt pave pen trash pretty park cube fragile unaware remain midnight betray rebuild" +balance = 100_000_000_000_000 +# secret_key: 7287ba251d44a4d3fd9276c88ce34c5c52a038955511cccaf77e61068649c17801 +# stx_address: ST1SJ3DTE5DN7X54YDH5D64R3BCB6A2AG2ZQ8YPD5 +# btc_address: mr1iPkD9N3RJZZxXRk7xF9d36gffa6exNC + +[accounts.wallet_2] +mnemonic = "hold excess usual excess ring elephant install account glad dry fragile donkey gaze humble truck breeze nation gasp vacuum limb head keep delay hospital" +balance = 100_000_000_000_000 +# secret_key: 530d9f61984c888536871c6573073bdfc0058896dc1adfe9a6a10dfacadc209101 +# stx_address: ST2CY5V39NHDPWSXMW9QDT3HC3GD6Q6XX4CFRK9AG +# btc_address: muYdXKmX9bByAueDe6KFfHd5Ff1gdN9ErG + +[accounts.wallet_3] +mnemonic = "cycle puppy glare enroll cost improve round trend wrist mushroom scorpion tower claim oppose clever elephant dinosaur eight problem before frozen dune wagon high" +balance = 100_000_000_000_000 +# secret_key: d655b2523bcd65e34889725c73064feb17ceb796831c0e111ba1a552b0f31b3901 +# stx_address: ST2JHG361ZXG51QTKY2NQCVBPPRRE2KZB1HR05NNC +# btc_address: mvZtbibDAAA3WLpY7zXXFqRa3T4XSknBX7 + +[accounts.wallet_4] +mnemonic = "board list obtain sugar hour worth raven scout denial thunder horse logic fury scorpion fold genuine phrase wealth news aim below celery when cabin" +balance = 100_000_000_000_000 +# secret_key: f9d7206a47f14d2870c163ebab4bf3e70d18f5d14ce1031f3902fbbc894fe4c701 +# stx_address: ST2NEB84ASENDXKYGJPQW86YXQCEFEX2ZQPG87ND +# btc_address: mg1C76bNTutiCDV3t9nWhZs3Dc8LzUufj8 + +[accounts.wallet_5] +mnemonic = "hurry aunt blame peanut heavy update captain human rice crime juice adult scale device promote vast project quiz unit note reform update climb purchase" +balance = 100_000_000_000_000 +# secret_key: 3eccc5dac8056590432db6a35d52b9896876a3d5cbdea53b72400bc9c2099fe801 +# stx_address: ST2REHHS5J3CERCRBEPMGH7921Q6PYKAADT7JP2VB +# btc_address: mweN5WVqadScHdA81aATSdcVr4B6dNokqx + +[accounts.wallet_6] +mnemonic = "area desk dutch sign gold cricket dawn toward giggle vibrant indoor bench warfare wagon number tiny universe sand talk dilemma pottery bone trap buddy" +balance = 100_000_000_000_000 +# secret_key: 7036b29cb5e235e5fd9b09ae3e8eec4404e44906814d5d01cbca968a60ed4bfb01 +# stx_address: ST3AM1A56AK2C1XAFJ4115ZSV26EB49BVQ10MGCS0 +# btc_address: mzxXgV6e4BZSsz8zVHm3TmqbECt7mbuErt + +[accounts.wallet_7] +mnemonic = "prevent gallery kind limb income control noise together echo rival record wedding sense uncover school version force bleak nuclear include danger skirt enact arrow" +balance = 100_000_000_000_000 +# secret_key: b463f0df6c05d2f156393eee73f8016c5372caa0e9e29a901bb7171d90dc4f1401 +# stx_address: ST3PF13W7Z0RRM42A8VZRVFQ75SV1K26RXEP8YGKJ +# btc_address: n37mwmru2oaVosgfuvzBwgV2ysCQRrLko7 + +[accounts.wallet_8] +mnemonic = "female adjust gallery certain visit token during great side clown fitness like hurt clip knife warm bench start reunion globe detail dream depend fortune" +balance = 100_000_000_000_000 +# secret_key: 6a1a754ba863d7bab14adbbc3f8ebb090af9e871ace621d3e5ab634e1422885e01 +# stx_address: ST3NBRSFKX28FQ2ZJ1MAKX58HKHSDGNV5N7R21XCP +# btc_address: n2v875jbJ4RjBnTjgbfikDfnwsDV5iUByw + +[accounts.faucet] +mnemonic = "shadow private easily thought say logic fault paddle word top book during ignore notable orange flight clock image wealth health outside kitten belt reform" +balance = 100_000_000_000_000 +# secret_key: de433bdfa14ec43aa1098d5be594c8ffb20a31485ff9de2923b2689471c401b801 +# stx_address: STNHKEPYEPJ8ET55ZZ0M5A34J0R3N5FM2CMMMAZ6 +# btc_address: mjSrB3wS4xab3kYqFktwBzfTdPg367ZJ2d + +[devnet] +disable_stacks_explorer = false +disable_stacks_api = false +# disable_subnet_api = false +# disable_bitcoin_explorer = true +# working_dir = "tmp/devnet" +# stacks_node_events_observers = ["host.docker.internal:8002"] +# miner_mnemonic = "fragile loan twenty basic net assault jazz absorb diet talk art shock innocent float punch travel gadget embrace caught blossom hockey surround initial reduce" +# miner_derivation_path = "m/44'/5757'/0'/0/0" +# faucet_mnemonic = "shadow private easily thought say logic fault paddle word top book during ignore notable orange flight clock image wealth health outside kitten belt reform" +# faucet_derivation_path = "m/44'/5757'/0'/0/0" +# orchestrator_port = 20445 +# bitcoin_node_p2p_port = 18444 +# bitcoin_node_rpc_port = 18443 +# bitcoin_node_username = "devnet" +# bitcoin_node_password = "devnet" +# bitcoin_controller_block_time = 30_000 +# stacks_node_rpc_port = 20443 +# stacks_node_p2p_port = 20444 +# stacks_api_port = 3999 +# stacks_api_events_port = 3700 +# bitcoin_explorer_port = 8001 +# stacks_explorer_port = 8000 +# postgres_port = 5432 +# postgres_username = "postgres" +# postgres_password = "postgres" +# postgres_database = "postgres" +# bitcoin_node_image_url = "quay.io/hirosystems/bitcoind:26.0" +# stacks_node_image_url = "quay.io/hirosystems/stacks-node:devnet-2.5" +# stacks_signer_image_url = "quay.io/hirosystems/stacks-node:devnet-2.5" +# stacks_api_image_url = "hirosystems/stacks-blockchain-api:master" +# stacks_explorer_image_url = "hirosystems/explorer:latest" +# bitcoin_explorer_image_url = "quay.io/hirosystems/bitcoin-explorer:devnet" +# postgres_image_url = "postgres:alpine" +# enable_subnet_node = true +# subnet_node_image_url = "hirosystems/stacks-subnets:0.8.1" +# subnet_leader_mnemonic = "twice kind fence tip hidden tilt action fragile skin nothing glory cousin green tomorrow spring wrist shed math olympic multiply hip blue scout claw" +# subnet_leader_derivation_path = "m/44'/5757'/0'/0/0" +# subnet_contract_id = "ST173JK7NZBA4BS05ZRATQH1K89YJMTGEH1Z5J52E.subnet-v3-0-1" +# subnet_node_rpc_port = 30443 +# subnet_node_p2p_port = 30444 +# subnet_events_ingestion_port = 30445 +# subnet_node_events_observers = ["host.docker.internal:8002"] +# subnet_api_image_url = "hirosystems/stacks-blockchain-api:master" +# subnet_api_postgres_database = "subnet_api" + +# For testing in epoch 2.1 / using Clarity2 +# epoch_2_0 = 100 +# epoch_2_05 = 100 +# epoch_2_1 = 101 +# epoch_2_2 = 102 +# epoch_2_3 = 103 +# epoch_2_4 = 104 +# epoch_2_5 = 108 + + +# Send some stacking orders +[[devnet.pox_stacking_orders]] +start_at_cycle = 1 +duration = 12 +wallet = "wallet_1" +slots = 2 +btc_address = "mr1iPkD9N3RJZZxXRk7xF9d36gffa6exNC" + +[[devnet.pox_stacking_orders]] +start_at_cycle = 1 +duration = 12 +wallet = "wallet_2" +slots = 1 +btc_address = "muYdXKmX9bByAueDe6KFfHd5Ff1gdN9ErG" + +[[devnet.pox_stacking_orders]] +start_at_cycle = 1 +duration = 12 +wallet = "wallet_3" +slots = 1 +btc_address = "mvZtbibDAAA3WLpY7zXXFqRa3T4XSknBX7" diff --git a/contrib/boot-contracts-unit-tests/tests/helpers.ts b/contrib/boot-contracts-unit-tests/tests/helpers.ts new file mode 100644 index 00000000000..9fb55187b29 --- /dev/null +++ b/contrib/boot-contracts-unit-tests/tests/helpers.ts @@ -0,0 +1,548 @@ +import { ClarityEvent } from "@hirosystems/clarinet-sdk"; +import { + getPublicKeyFromPrivate, + publicKeyToBtcAddress, +} from "@stacks/encryption"; +import { StacksDevnet } from "@stacks/network"; +import { + Pox4SignatureTopic, + StackingClient, + poxAddressToTuple, +} from "@stacks/stacking"; +import { + Cl, + ResponseOkCV, + StacksPrivateKey, + TransactionVersion, + TupleCV, + UIntCV, + createStacksPrivateKey, + getAddressFromPrivateKey, +} from "@stacks/transactions"; +import { expect } from "vitest"; + +export const POX_DEPLOYER = "ST000000000000000000002AMW42H"; +export const POX_CONTRACT = `${POX_DEPLOYER}.pox-4`; + +// Error codes from the contract +export const ERRORS = { + ERR_STACKING_UNREACHABLE: 255, + ERR_STACKING_CORRUPTED_STATE: 254, + ERR_STACKING_INSUFFICIENT_FUNDS: 1, + ERR_STACKING_INVALID_LOCK_PERIOD: 2, + ERR_STACKING_ALREADY_STACKED: 3, + ERR_STACKING_NO_SUCH_PRINCIPAL: 4, + ERR_STACKING_EXPIRED: 5, + ERR_STACKING_STX_LOCKED: 6, + ERR_STACKING_PERMISSION_DENIED: 9, + ERR_STACKING_THRESHOLD_NOT_MET: 11, + ERR_STACKING_POX_ADDRESS_IN_USE: 12, + ERR_STACKING_INVALID_POX_ADDRESS: 13, + ERR_STACKING_INVALID_AMOUNT: 18, + ERR_NOT_ALLOWED: 19, + ERR_STACKING_ALREADY_DELEGATED: 20, + ERR_DELEGATION_EXPIRES_DURING_LOCK: 21, + ERR_DELEGATION_TOO_MUCH_LOCKED: 22, + ERR_DELEGATION_POX_ADDR_REQUIRED: 23, + ERR_INVALID_START_BURN_HEIGHT: 24, + ERR_NOT_CURRENT_STACKER: 25, + ERR_STACK_EXTEND_NOT_LOCKED: 26, + ERR_STACK_INCREASE_NOT_LOCKED: 27, + ERR_DELEGATION_NO_REWARD_SLOT: 28, + ERR_DELEGATION_WRONG_REWARD_SLOT: 29, + ERR_STACKING_IS_DELEGATED: 30, + ERR_STACKING_NOT_DELEGATED: 31, + ERR_INVALID_SIGNER_KEY: 32, + ERR_REUSED_SIGNER_KEY: 33, + ERR_DELEGATION_ALREADY_REVOKED: 34, + ERR_INVALID_SIGNATURE_PUBKEY: 35, + ERR_INVALID_SIGNATURE_RECOVER: 36, + ERR_INVALID_REWARD_CYCLE: 37, + ERR_SIGNER_AUTH_AMOUNT_TOO_HIGH: 38, + ERR_SIGNER_AUTH_USED: 39, + ERR_INVALID_INCREASE: 40, +}; + +// Keys to use for stacking +// wallet_1, wallet_2, wallet_3 private keys +const stackingKeys = [ + "7287ba251d44a4d3fd9276c88ce34c5c52a038955511cccaf77e61068649c17801", + "530d9f61984c888536871c6573073bdfc0058896dc1adfe9a6a10dfacadc209101", + "d655b2523bcd65e34889725c73064feb17ceb796831c0e111ba1a552b0f31b3901", +]; + +export type StackerInfo = { + authId: number; + privKey: string; + pubKey: string; + stxAddress: string; + btcAddr: string; + signerPrivKey: StacksPrivateKey; + signerPubKey: string; + client: StackingClient; +}; + +export const stackers = Object.freeze( + stackingKeys.map((privKey, i) => { + const network = new StacksDevnet(); + + const pubKey = getPublicKeyFromPrivate(privKey); + const stxAddress = getAddressFromPrivateKey( + privKey, + TransactionVersion.Testnet + ); + const signerPrivKey = createStacksPrivateKey(privKey); + const signerPubKey = getPublicKeyFromPrivate(signerPrivKey.data); + + const info: StackerInfo = { + authId: i, + privKey, + pubKey, + stxAddress, + btcAddr: publicKeyToBtcAddress(pubKey), + signerPrivKey: signerPrivKey, + signerPubKey: signerPubKey, + client: new StackingClient(stxAddress, network), + }; + return info; + }) +); + +export const getPoxInfo = () => { + const poxInfo = simnet.callReadOnlyFn( + POX_CONTRACT, + "get-pox-info", + [], + simnet.deployer + ); + // @ts-ignore + const data = poxInfo.result.value.data; + const typedPoxInfo = { + firstBurnchainBlockHeight: data["first-burnchain-block-height"] + .value as bigint, + minAmountUstx: data["min-amount-ustx"].value as bigint, + prepareCycleLength: data["prepare-cycle-length"].value as bigint, + rewardCycleId: data["reward-cycle-id"].value as bigint, + rewardCycleLength: data["reward-cycle-length"].value as bigint, + totalLiquidSupplyUstx: data["total-liquid-supply-ustx"].value as bigint, + }; + + return typedPoxInfo; +}; + +export const getStackingMinimum = () => { + const response = simnet.callReadOnlyFn( + POX_CONTRACT, + "get-stacking-minimum", + [], + simnet.deployer + ); + return (response.result as UIntCV).value; +}; + +export const burnHeightToRewardCycle = (burnHeight: bigint | number) => { + const poxInfo = getPoxInfo(); + return Number( + (BigInt(burnHeight) - poxInfo.firstBurnchainBlockHeight) / + poxInfo.rewardCycleLength + ); +}; + +export const stackStx = ( + stacker: StackerInfo, + amount: bigint | number, + startBurnHeight: bigint | number, + lockPeriod: bigint | number, + maxAmount: bigint | number, + authId: bigint | number, + sender: string +) => { + const rewardCycle = burnHeightToRewardCycle(startBurnHeight); + const sigArgs = { + authId: authId, + maxAmount: maxAmount, + rewardCycle, + period: Number(lockPeriod), + topic: Pox4SignatureTopic.StackStx, + poxAddress: stacker.btcAddr, + signerPrivateKey: stacker.signerPrivKey, + }; + const signerSignature = stacker.client.signPoxSignature(sigArgs); + const signerKey = Cl.bufferFromHex(stacker.signerPubKey); + + const stackStxArgs = [ + Cl.uint(amount), + poxAddressToTuple(stacker.btcAddr), + Cl.uint(startBurnHeight), + Cl.uint(lockPeriod), + Cl.some(Cl.bufferFromHex(signerSignature)), + signerKey, + Cl.uint(maxAmount), + Cl.uint(authId), + ]; + + return simnet.callPublicFn(POX_CONTRACT, "stack-stx", stackStxArgs, sender); +}; + +export const stackIncrease = ( + stacker: StackerInfo, + increaseBy: bigint | number, + lockPeriod: bigint | number, + maxAmount: bigint | number, + authId: bigint | number, + sender: string +) => { + const rewardCycle = burnHeightToRewardCycle(simnet.blockHeight); + const sigArgs = { + authId, + maxAmount, + rewardCycle, + period: Number(lockPeriod), + topic: Pox4SignatureTopic.StackIncrease, + poxAddress: stacker.btcAddr, + signerPrivateKey: stacker.signerPrivKey, + }; + const signerSignature = stacker.client.signPoxSignature(sigArgs); + const signerKey = Cl.bufferFromHex(stacker.signerPubKey); + + const stackIncreaseArgs = [ + Cl.uint(increaseBy), + Cl.some(Cl.bufferFromHex(signerSignature)), + signerKey, + Cl.uint(maxAmount), + Cl.uint(authId), + ]; + + return simnet.callPublicFn( + POX_CONTRACT, + "stack-increase", + stackIncreaseArgs, + sender + ); +}; + +export const stackExtend = ( + stacker: StackerInfo, + extendCount: bigint | number, + maxAmount: bigint | number, + authId: bigint | number, + sender: string +) => { + const rewardCycle = burnHeightToRewardCycle(simnet.blockHeight); + const sigArgs = { + authId, + maxAmount, + rewardCycle, + period: Number(extendCount), + topic: Pox4SignatureTopic.StackExtend, + poxAddress: stacker.btcAddr, + signerPrivateKey: stacker.signerPrivKey, + }; + const signerSignature = stacker.client.signPoxSignature(sigArgs); + const signerKey = Cl.bufferFromHex(stacker.signerPubKey); + + const stackExtendArgs = [ + Cl.uint(extendCount), + poxAddressToTuple(stacker.btcAddr), + Cl.some(Cl.bufferFromHex(signerSignature)), + signerKey, + Cl.uint(maxAmount), + Cl.uint(authId), + ]; + + return simnet.callPublicFn( + POX_CONTRACT, + "stack-extend", + stackExtendArgs, + sender + ); +}; + +export const delegateStx = ( + amount: bigint | number, + delegateTo: string, + untilBurnHeight: bigint | number | null, + poxAddr: string | null, + sender: string +) => { + const delegateStxArgs = [ + Cl.uint(amount), + Cl.principal(delegateTo), + untilBurnHeight ? Cl.some(Cl.uint(untilBurnHeight)) : Cl.none(), + poxAddr ? Cl.some(poxAddressToTuple(poxAddr)) : Cl.none(), + ]; + + return simnet.callPublicFn( + POX_CONTRACT, + "delegate-stx", + delegateStxArgs, + sender + ); +}; + +export const revokeDelegateStx = (sender: string) => { + return simnet.callPublicFn(POX_CONTRACT, "revoke-delegate-stx", [], sender); +}; + +export const delegateStackStx = ( + stacker: string, + amount: bigint | number, + poxAddr: string, + startBurnHeight: bigint | number, + lockPeriod: bigint | number, + sender: string +) => { + const delegateStackStxArgs = [ + Cl.principal(stacker), + Cl.uint(amount), + poxAddressToTuple(poxAddr), + Cl.uint(startBurnHeight), + Cl.uint(lockPeriod), + ]; + return simnet.callPublicFn( + POX_CONTRACT, + "delegate-stack-stx", + delegateStackStxArgs, + sender + ); +}; + +export const delegateStackExtend = ( + stacker: string, + poxAddr: string, + extendCount: bigint | number, + sender: string +) => { + const delegateStackExtendArgs = [ + Cl.principal(stacker), + poxAddressToTuple(poxAddr), + Cl.uint(extendCount), + ]; + return simnet.callPublicFn( + POX_CONTRACT, + "delegate-stack-extend", + delegateStackExtendArgs, + sender + ); +}; + +export const delegateStackIncrease = ( + stacker: string, + poxAddr: string, + increaseBy: bigint | number, + sender: string +) => { + const delegateStackIncreaseArgs = [ + Cl.principal(stacker), + poxAddressToTuple(poxAddr), + Cl.uint(increaseBy), + ]; + return simnet.callPublicFn( + POX_CONTRACT, + "delegate-stack-increase", + delegateStackIncreaseArgs, + sender + ); +}; + +export const allowContractCaller = ( + caller: string, + untilBurnHeight: bigint | number | null, + sender: string +) => { + const args = [ + Cl.principal(caller), + untilBurnHeight ? Cl.some(Cl.uint(untilBurnHeight)) : Cl.none(), + ]; + return simnet.callPublicFn( + POX_CONTRACT, + "allow-contract-caller", + args, + sender + ); +}; + +export const disallowContractCaller = (caller: string, sender: string) => { + const args = [Cl.principal(caller)]; + return simnet.callPublicFn( + POX_CONTRACT, + "disallow-contract-caller", + args, + sender + ); +}; + +export const stackAggregationCommitIndexed = ( + stacker: StackerInfo, + rewardCycle: bigint | number, + maxAmount: bigint | number, + authId: bigint | number, + sender: string +) => { + const period = 1; + const sigArgs = { + authId, + maxAmount, + rewardCycle: Number(rewardCycle), + period: Number(period), + topic: Pox4SignatureTopic.AggregateCommit, + poxAddress: stacker.btcAddr, + signerPrivateKey: stacker.signerPrivKey, + }; + const signerSignature = stacker.client.signPoxSignature(sigArgs); + const signerKey = Cl.bufferFromHex(stacker.signerPubKey); + + const args = [ + poxAddressToTuple(stacker.btcAddr), + Cl.uint(rewardCycle), + Cl.some(Cl.bufferFromHex(signerSignature)), + signerKey, + Cl.uint(maxAmount), + Cl.uint(authId), + ]; + return simnet.callPublicFn( + POX_CONTRACT, + "stack-aggregation-commit-indexed", + args, + sender + ); +}; + +export const stackAggregationIncrease = ( + stacker: StackerInfo, + rewardCycle: bigint | number, + rewardCycleIndex: bigint | number, + maxAmount: bigint | number, + authId: bigint | number, + sender: string +) => { + const period = 1; + const sigArgs = { + authId, + maxAmount, + rewardCycle: Number(rewardCycle), + period: Number(period), + topic: Pox4SignatureTopic.AggregateIncrease, + poxAddress: stacker.btcAddr, + signerPrivateKey: stacker.signerPrivKey, + }; + const signerSignature = stacker.client.signPoxSignature(sigArgs); + const signerKey = Cl.bufferFromHex(stacker.signerPubKey); + + const args = [ + poxAddressToTuple(stacker.btcAddr), + Cl.uint(rewardCycle), + Cl.uint(rewardCycleIndex), + Cl.some(Cl.bufferFromHex(signerSignature)), + signerKey, + Cl.uint(maxAmount), + Cl.uint(authId), + ]; + return simnet.callPublicFn( + POX_CONTRACT, + "stack-aggregation-increase", + args, + sender + ); +}; + +export const setSignerKeyAuthorization = ( + stacker: StackerInfo, + period: bigint | number, + rewardCycle: bigint | number, + topic: Pox4SignatureTopic, + allowed: boolean, + maxAmount: bigint | number, + authId: bigint | number +) => { + const args = [ + poxAddressToTuple(stacker.btcAddr), + Cl.uint(period), + Cl.uint(rewardCycle), + Cl.stringAscii(topic), + Cl.bufferFromHex(stacker.signerPubKey), + Cl.bool(allowed), + Cl.uint(maxAmount), + Cl.uint(authId), + ]; + return simnet.callPublicFn( + POX_CONTRACT, + "set-signer-key-authorization", + args, + stacker.stxAddress + ); +}; + +// Validate a pox-4 event and return the value of the event. +export const checkPox4Event = (event: ClarityEvent): TupleCV => { + expect(event.event).toEqual("print_event"); + expect(event.data.contract_identifier).toEqual(POX_CONTRACT); + expect(event.data.topic).toEqual("print"); + const value = (event.data.value! as ResponseOkCV).value; + return value as TupleCV; +}; + +// Validate the event that should be generated for a stack-* function, +// a delegate-stack-* function, or a delegate-stx function. +const checkStackOrDelegateEvent = ( + value: TupleCV, + name: string, + stacker: string, + balance: bigint, + locked: bigint, + burnchainUnlockHeight: bigint +) => { + const tuple = value.data; + expect(tuple["name"]).toBeAscii(name); + expect(tuple["stacker"]).toBePrincipal(stacker); + expect(tuple["balance"]).toBeUint(balance); + expect(tuple["locked"]).toBeUint(locked); + expect(tuple["burnchain-unlock-height"]).toBeUint(burnchainUnlockHeight); +}; + +// Validate the event that should be generated for a delegate-stx function. +export const checkDelegateStxEvent = ( + event: ClarityEvent, + stacker: string, + balance: bigint, + locked: bigint, + burnchainUnlockHeight: bigint, + amountUstx: bigint, + delegateTo: string, + poxAddr: string, + unlockBurnHeight: bigint +) => { + let value = checkPox4Event(event); + checkStackOrDelegateEvent( + value, + "delegate-stx", + stacker, + balance, + locked, + burnchainUnlockHeight + ); + const tuple = value.data; + const data = (tuple["data"] as TupleCV).data; + expect(data["amount-ustx"]).toBeUint(amountUstx); + expect(data["delegate-to"]).toBePrincipal(delegateTo); + if (poxAddr) { + expect(data["pox-addr"]).toBeSome(poxAddressToTuple(poxAddr)); + } else { + expect(data["pox-addr"]).toBeNone(); + } + if (unlockBurnHeight) { + expect(data["unlock-burn-height"]).toBeSome(Cl.uint(unlockBurnHeight)); + } else { + expect(data["unlock-burn-height"]).toBeNone(); + } +}; + +// Get the stacking state for a stacker. +export const getStackerInfo = (stacker: string) => { + return simnet.callReadOnlyFn( + POX_CONTRACT, + "get-stacker-info", + [Cl.principal(stacker)], + simnet.deployer + ); +}; diff --git a/contrib/boot-contracts-unit-tests/tests/misc.test.ts b/contrib/boot-contracts-unit-tests/tests/misc.test.ts new file mode 100644 index 00000000000..d50f2ef6d38 --- /dev/null +++ b/contrib/boot-contracts-unit-tests/tests/misc.test.ts @@ -0,0 +1,1725 @@ +import { beforeEach, describe, expect, it } from "vitest"; +import { Cl, ClarityType } from "@stacks/transactions"; +import { Pox4SignatureTopic, poxAddressToTuple } from "@stacks/stacking"; +import { + ERRORS, + POX_CONTRACT, + StackerInfo, + allowContractCaller, + delegateStackStx, + delegateStx, + getStackingMinimum, + setSignerKeyAuthorization, + stackAggregationCommitIndexed, + stackStx, + stackers, +} from "./helpers"; + +const accounts = simnet.getAccounts(); +const deployer = accounts.get("deployer")!; +const address1 = accounts.get("wallet_1")!; +const address3 = accounts.get("wallet_3")!; + +beforeEach(() => { + simnet.setEpoch("3.0"); +}); + +describe("test `set-burnchain-parameters`", () => { + it("sets the parameters correctly", () => { + const response = simnet.callPublicFn( + POX_CONTRACT, + "set-burnchain-parameters", + [Cl.uint(100), Cl.uint(5), Cl.uint(20), Cl.uint(6)], + address1 + ); + expect(response.result).toBeOk(Cl.bool(true)); + + const fbbh = simnet.getDataVar( + POX_CONTRACT, + "first-burnchain-block-height" + ); + expect(fbbh).toBeUint(100); + + const ppcl = simnet.getDataVar(POX_CONTRACT, "pox-prepare-cycle-length"); + expect(ppcl).toBeUint(5); + + const prcl = simnet.getDataVar(POX_CONTRACT, "pox-reward-cycle-length"); + expect(prcl).toBeUint(20); + + const configured = simnet.getDataVar(POX_CONTRACT, "configured"); + expect(configured).toBeBool(true); + }); + + it("cannot be called twice", () => { + simnet.callPublicFn( + POX_CONTRACT, + "set-burnchain-parameters", + [Cl.uint(100), Cl.uint(5), Cl.uint(20), Cl.uint(6)], + address1 + ); + const response = simnet.callPublicFn( + POX_CONTRACT, + "set-burnchain-parameters", + [Cl.uint(101), Cl.uint(6), Cl.uint(21), Cl.uint(7)], + address1 + ); + expect(response.result).toBeErr(Cl.int(ERRORS.ERR_NOT_ALLOWED)); + }); +}); + +describe("test `burn-height-to-reward-cycle`", () => { + it("returns the correct reward cycle", () => { + let response = simnet.callReadOnlyFn( + POX_CONTRACT, + "burn-height-to-reward-cycle", + [Cl.uint(1)], + address1 + ); + expect(response.result).toBeUint(0); + + response = simnet.callReadOnlyFn( + POX_CONTRACT, + "burn-height-to-reward-cycle", + [Cl.uint(2099)], + address1 + ); + expect(response.result).toBeUint(1); + + response = simnet.callReadOnlyFn( + POX_CONTRACT, + "burn-height-to-reward-cycle", + [Cl.uint(2100)], + address1 + ); + expect(response.result).toBeUint(2); + + response = simnet.callReadOnlyFn( + POX_CONTRACT, + "burn-height-to-reward-cycle", + [Cl.uint(2101)], + address1 + ); + expect(response.result).toBeUint(2); + }); + + it("returns the correct reward cycle with modified configuration", () => { + simnet.callPublicFn( + POX_CONTRACT, + "set-burnchain-parameters", + [Cl.uint(100), Cl.uint(5), Cl.uint(20), Cl.uint(6)], + address1 + ); + + expect(() => + simnet.callReadOnlyFn( + POX_CONTRACT, + "burn-height-to-reward-cycle", + [Cl.uint(1)], + address1 + ) + ).toThrowError(); + + expect(() => + simnet.callReadOnlyFn( + POX_CONTRACT, + "burn-height-to-reward-cycle", + [Cl.uint(99)], + address1 + ) + ).toThrowError(); + + let response = simnet.callReadOnlyFn( + POX_CONTRACT, + "burn-height-to-reward-cycle", + [Cl.uint(100)], + address1 + ); + expect(response.result).toBeUint(0); + + response = simnet.callReadOnlyFn( + POX_CONTRACT, + "burn-height-to-reward-cycle", + [Cl.uint(101)], + address1 + ); + expect(response.result).toBeUint(0); + + response = simnet.callReadOnlyFn( + POX_CONTRACT, + "burn-height-to-reward-cycle", + [Cl.uint(119)], + address1 + ); + expect(response.result).toBeUint(0); + + response = simnet.callReadOnlyFn( + POX_CONTRACT, + "burn-height-to-reward-cycle", + [Cl.uint(120)], + address1 + ); + expect(response.result).toBeUint(1); + + response = simnet.callReadOnlyFn( + POX_CONTRACT, + "burn-height-to-reward-cycle", + [Cl.uint(121)], + address1 + ); + expect(response.result).toBeUint(1); + + response = simnet.callReadOnlyFn( + POX_CONTRACT, + "burn-height-to-reward-cycle", + [Cl.uint(140)], + address1 + ); + expect(response.result).toBeUint(2); + }); +}); + +describe("test `reward-cycle-to-burn-height`", () => { + it("returns the correct burn height", () => { + let response = simnet.callReadOnlyFn( + POX_CONTRACT, + "reward-cycle-to-burn-height", + [Cl.uint(0)], + address1 + ); + expect(response.result).toBeUint(0); + + response = simnet.callReadOnlyFn( + POX_CONTRACT, + "reward-cycle-to-burn-height", + [Cl.uint(1)], + address1 + ); + expect(response.result).toBeUint(1050); + + response = simnet.callReadOnlyFn( + POX_CONTRACT, + "reward-cycle-to-burn-height", + [Cl.uint(2)], + address1 + ); + expect(response.result).toBeUint(2100); + + expect(() => + simnet.callReadOnlyFn( + POX_CONTRACT, + "reward-cycle-to-burn-height", + [Cl.uint(340282366920938463463374607431768211455n)], + address1 + ) + ).toThrowError(); + }); +}); + +describe("test `current-pox-reward-cycle`", () => { + it("returns the correct reward cycle", () => { + let response = simnet.callReadOnlyFn( + POX_CONTRACT, + "current-pox-reward-cycle", + [], + address1 + ); + expect(response.result).toBeUint(0); + + simnet.mineEmptyBlocks(2099); + + response = simnet.callReadOnlyFn( + POX_CONTRACT, + "current-pox-reward-cycle", + [], + address1 + ); + expect(response.result).toBeUint(1); + + simnet.mineEmptyBlock(); + response = simnet.callReadOnlyFn( + POX_CONTRACT, + "current-pox-reward-cycle", + [], + address1 + ); + expect(response.result).toBeUint(2); + }); +}); + +describe("test `get-stacker-info`", () => { + it("returns none when principal is not stacked", () => { + const response = simnet.callReadOnlyFn( + POX_CONTRACT, + "get-stacker-info", + [Cl.principal(address1)], + address1 + ); + expect(response.result).toBeNone(); + }); + + it("returns info before stacked", () => { + const stacker = stackers[0]; + const amount = getStackingMinimum() * 2n; + let stackResponse = stackStx( + stacker, + amount, + 1000, + 6, + amount, + stacker.authId, + address1 + ); + expect(stackResponse.result.type).toBe(ClarityType.ResponseOk); + const response = simnet.callReadOnlyFn( + POX_CONTRACT, + "get-stacker-info", + [Cl.principal(stacker.stxAddress)], + address1 + ); + expect(response.result).toBeSome( + Cl.tuple({ + "delegated-to": Cl.none(), + "first-reward-cycle": Cl.uint(1), + "lock-period": Cl.uint(6), + "pox-addr": poxAddressToTuple(stacker.btcAddr), + "reward-set-indexes": Cl.list([ + Cl.uint(0), + Cl.uint(0), + Cl.uint(0), + Cl.uint(0), + Cl.uint(0), + Cl.uint(0), + ]), + }) + ); + }); + + it("returns info while stacked", () => { + const stacker = stackers[0]; + const amount = getStackingMinimum() * 2n; + let stackResponse = stackStx( + stacker, + amount, + 1000, + 6, + amount, + stacker.authId, + address1 + ); + expect(stackResponse.result.type).toBe(ClarityType.ResponseOk); + simnet.mineEmptyBlocks(2100); + const response = simnet.callReadOnlyFn( + POX_CONTRACT, + "get-stacker-info", + [Cl.principal(stacker.stxAddress)], + address1 + ); + expect(response.result).toBeSome( + Cl.tuple({ + "delegated-to": Cl.none(), + "first-reward-cycle": Cl.uint(1), + "lock-period": Cl.uint(6), + "pox-addr": poxAddressToTuple(stacker.btcAddr), + "reward-set-indexes": Cl.list([ + Cl.uint(0), + Cl.uint(0), + Cl.uint(0), + Cl.uint(0), + Cl.uint(0), + Cl.uint(0), + ]), + }) + ); + }); + + it("returns none after stacking expired", () => { + const stacker = stackers[0]; + const amount = getStackingMinimum() * 2n; + let stackResponse = stackStx( + stacker, + amount, + 1000, + 6, + amount, + stacker.authId, + address1 + ); + expect(stackResponse.result.type).toBe(ClarityType.ResponseOk); + simnet.mineEmptyBlocks(7350); + const response = simnet.callReadOnlyFn( + POX_CONTRACT, + "get-stacker-info", + [Cl.principal(stacker.stxAddress)], + address1 + ); + expect(response.result).toBeNone(); + }); +}); + +describe("test `check-caller-allowed`", () => { + it("returns true when called directly", () => { + const response = simnet.callReadOnlyFn( + POX_CONTRACT, + "check-caller-allowed", + [], + address1 + ); + expect(response.result).toBeBool(true); + }); + + it("returns false when called indirectly by unapproved caller", () => { + const response = simnet.callReadOnlyFn( + "indirect", + "check-caller-allowed", + [], + address1 + ); + + expect(response.result).toBeBool(false); + }); + + it("returns true when called indirectly by approved caller", () => { + allowContractCaller(`${deployer}.indirect`, null, address1); + const response = simnet.callReadOnlyFn( + "indirect", + "check-caller-allowed", + [], + address1 + ); + + expect(response.result).toBeBool(true); + }); + + it("returns false when called indirectly by approved caller which has expired", () => { + allowContractCaller(`${deployer}.indirect`, 10n, address1); + + let response = simnet.callReadOnlyFn( + "indirect", + "check-caller-allowed", + [], + address1 + ); + + expect(response.result).toBeBool(true); + + // mine 11 blocks to expire the caller + simnet.mineEmptyBlocks(11); + + response = simnet.callReadOnlyFn( + "indirect", + "check-caller-allowed", + [], + address1 + ); + + expect(response.result).toBeBool(false); + }); +}); + +describe("test `get-reward-set-size`", () => { + it("returns 0 when no stacking has occurred", () => { + const response = simnet.callReadOnlyFn( + POX_CONTRACT, + "get-reward-set-size", + [Cl.uint(0)], + address1 + ); + expect(response.result).toBeUint(0); + }); + + it("returns number of stackers", () => { + const amount = getStackingMinimum() * 2n; + + stackers.forEach((stacker) => { + const { result } = stackStx( + stacker, + amount, + 1000, + 6, + amount, + stacker.authId, + stacker.stxAddress + ); + expect(result).toHaveClarityType(ClarityType.ResponseOk); + }); + + const responseCycle1 = simnet.callReadOnlyFn( + POX_CONTRACT, + "get-reward-set-size", + [Cl.uint(1)], + address1 + ); + expect(responseCycle1.result).toBeUint(3); + + const responseCycle7 = simnet.callReadOnlyFn( + POX_CONTRACT, + "get-reward-set-size", + [Cl.uint(7)], + address1 + ); + expect(responseCycle7.result).toBeUint(0); + }); + + it("returns number of uniq pox address", () => { + const amount = getStackingMinimum() * 2n; + + stackers.forEach((_stacker) => { + const stacker: StackerInfo = { + ..._stacker, + btcAddr: stackers[0].btcAddr, + }; + const { result } = stackStx( + stacker, + amount, + 1000, + 6, + amount, + stacker.authId, + stacker.stxAddress + ); + expect(result).toHaveClarityType(ClarityType.ResponseOk); + }); + + const responseCycle1 = simnet.callReadOnlyFn( + POX_CONTRACT, + "get-reward-set-size", + [Cl.uint(1)], + address1 + ); + expect(responseCycle1.result).toBeUint(3); // should it be 1? + }); +}); + +describe("test `get-total-ustx-stacked`", () => { + it("returns 0 when no stacking has occurred", () => { + const response = simnet.callReadOnlyFn( + POX_CONTRACT, + "get-total-ustx-stacked", + [Cl.uint(0)], + address1 + ); + expect(response.result).toBeUint(0); + }); + + it("returns total amount stacked", () => { + const amount = getStackingMinimum() * 2n; + + stackers.forEach((stacker) => { + const { result } = stackStx( + stacker, + amount, + 1000, + 6, + amount, + stacker.authId, + stacker.stxAddress + ); + expect(result).toHaveClarityType(ClarityType.ResponseOk); + }); + + const responseCycle1 = simnet.callReadOnlyFn( + POX_CONTRACT, + "get-total-ustx-stacked", + [Cl.uint(1)], + address1 + ); + expect(responseCycle1.result).toBeUint(amount * 3n); + }); + + it("returns 0 in the cycle before stacking starts", () => { + const amount = getStackingMinimum() * 2n; + + // stacking txs sent in cycle 0, so stackers will be start in cycle 1 + stackers.forEach((stacker) => { + stackStx( + stacker, + amount, + 1000, + 6, + amount, + stacker.authId, + stacker.stxAddress + ); + }); + + const response = simnet.callReadOnlyFn( + POX_CONTRACT, + "get-total-ustx-stacked", + [Cl.uint(0)], + address1 + ); + expect(response.result).toBeUint(0); + }); + + it("returns total amount stacked", () => { + const amount = getStackingMinimum() * 2n; + + stackers.forEach((stacker) => { + stackStx( + stacker, + amount, + 1000, + 6, + amount, + stacker.authId, + stacker.stxAddress + ); + }); + + const response = simnet.callReadOnlyFn( + POX_CONTRACT, + "get-total-ustx-stacked", + [Cl.uint(1)], + address1 + ); + expect(response.result).toBeUint(amount * 3n); + }); + + it("expires stacking after the stacking duration has finsihed", () => { + const amount = getStackingMinimum() * 2n; + + stackers.forEach((stacker, i) => { + const { result } = stackStx( + stacker, + amount, + 1000, + // wallet_1 will expire after 2 cycles, wallet_2 after 4, wallet_3 after 6 + (i + 1) * 2, + amount, + stacker.authId, + stacker.stxAddress + ); + expect(result).toHaveClarityType(ClarityType.ResponseOk); + }); + + const responseCycle3 = simnet.callReadOnlyFn( + POX_CONTRACT, + "get-total-ustx-stacked", + [Cl.uint(3)], + address1 + ); + expect(responseCycle3.result).toBeUint(amount * 2n); + + const responseCycle5 = simnet.callReadOnlyFn( + POX_CONTRACT, + "get-total-ustx-stacked", + [Cl.uint(5)], + address1 + ); + expect(responseCycle5.result).toBeUint(amount * 1n); + + const responseCycle7 = simnet.callReadOnlyFn( + POX_CONTRACT, + "get-total-ustx-stacked", + [Cl.uint(7)], + address1 + ); + expect(responseCycle7.result).toBeUint(0); + }); +}); + +describe("test `get-reward-set-pox-address`", () => { + it("returns none when there is no stacker", () => { + const { result } = simnet.callReadOnlyFn( + POX_CONTRACT, + "get-reward-set-pox-address", + [Cl.uint(0), Cl.uint(0)], + address1 + ); + expect(result).toBeNone(); + }); + + it("returns pox address for a stacker", () => { + const amount = getStackingMinimum() * 2n; + stackers.forEach((stacker) => { + stackStx( + stacker, + amount, + 1000, + 6, + amount, + stacker.authId, + stacker.stxAddress + ); + }); + + const responseStacker0 = simnet.callReadOnlyFn( + POX_CONTRACT, + "get-reward-set-pox-address", + [Cl.uint(1), Cl.uint(0)], + address1 + ); + expect(responseStacker0.result).toBeSome( + Cl.tuple({ + "pox-addr": poxAddressToTuple(stackers[0].btcAddr), + signer: Cl.bufferFromHex(stackers[0].signerPubKey), + stacker: Cl.some(Cl.principal(stackers[0].stxAddress)), + "total-ustx": Cl.uint(amount), + }) + ); + const responseStacker1 = simnet.callReadOnlyFn( + POX_CONTRACT, + "get-reward-set-pox-address", + [Cl.uint(1), Cl.uint(1)], + address1 + ); + expect(responseStacker1.result).toBeSome( + Cl.tuple({ + "pox-addr": poxAddressToTuple(stackers[1].btcAddr), + signer: Cl.bufferFromHex(stackers[1].signerPubKey), + stacker: Cl.some(Cl.principal(stackers[1].stxAddress)), + "total-ustx": Cl.uint(amount), + }) + ); + }); +}); + +describe("test `get-stacking-minimum`", () => { + it("returns the correct minimum amount", () => { + const response = simnet.callReadOnlyFn( + POX_CONTRACT, + "get-stacking-minimum", + [], + address1 + ); + expect(response.result).toBeUint(125000000000); + }); +}); + +describe("test `check-pox-addr-version`", () => { + it("returns true for a valid version", () => { + const { result } = simnet.callReadOnlyFn( + POX_CONTRACT, + "check-pox-addr-version", + [poxAddressToTuple(stackers[0].btcAddr).data.version], + address1 + ); + expect(result).toBeBool(true); + }); + + it("returns false for an invalid version (> 6)", () => { + const { result } = simnet.callReadOnlyFn( + POX_CONTRACT, + "check-pox-addr-version", + [Cl.buffer(Buffer.from([7]))], + address1 + ); + expect(result).toBeBool(false); + }); +}); + +describe("test `check-pox-addr-hashbytes`", () => { + it("returns true for a valid address", () => { + const segwitAddress = poxAddressToTuple( + "36op6KLxdjBeBXnkNPi59UDTT2yZZGBYDm" + ); + + const segwitCheck = simnet.callReadOnlyFn( + POX_CONTRACT, + "check-pox-addr-hashbytes", + [segwitAddress.data.version, segwitAddress.data.hashbytes], + address1 + ); + expect(segwitCheck.result).toBeBool(true); + + const taprootAddress = poxAddressToTuple( + "bc1q82mfyran6u3y8r877vgkje45wlmvh85c7su3ljww9jv762znmrasn5ce59" + ); + + const { result } = simnet.callReadOnlyFn( + POX_CONTRACT, + "check-pox-addr-hashbytes", + [taprootAddress.data.version, taprootAddress.data.hashbytes], + address1 + ); + expect(result).toBeBool(true); + }); +}); + +describe("test `check-pox-lock-period`", () => { + it("returns true for a valid lock period", () => { + for (let i = 1; i <= 12; i++) { + const { result } = simnet.callReadOnlyFn( + POX_CONTRACT, + "check-pox-lock-period", + [Cl.uint(i)], + address1 + ); + expect(result).toBeBool(true); + } + }); + + it("returns false lock period of 0", () => { + const { result } = simnet.callReadOnlyFn( + POX_CONTRACT, + "check-pox-lock-period", + [Cl.uint(0)], + address1 + ); + expect(result).toBeBool(false); + }); + + it("returns false lock period of 13", () => { + const { result } = simnet.callReadOnlyFn( + POX_CONTRACT, + "check-pox-lock-period", + [Cl.uint(13)], + address1 + ); + expect(result).toBeBool(false); + }); +}); + +describe("test `can-stack-stx` and `minimal-can-stack-stx`", () => { + it("returns true for a valid stacker", () => { + const stacker = stackers[0]; + const amount = getStackingMinimum() * 2n; + const canStackArgs = [ + poxAddressToTuple(stacker.btcAddr), + Cl.uint(amount), + Cl.uint(1), // first reward cycle + Cl.uint(6), // lock period + ]; + + const { result } = simnet.callReadOnlyFn( + POX_CONTRACT, + "can-stack-stx", + canStackArgs, + address1 + ); + expect(result).toBeOk(Cl.bool(true)); + }); + + it("returns error if amount is too low", () => { + const stacker = stackers[0]; + const amount = getStackingMinimum() / 2n; + const canStackArgs = [ + poxAddressToTuple(stacker.btcAddr), + Cl.uint(amount), + Cl.uint(1), // first reward cycle + Cl.uint(6), // lock period + ]; + + const { result } = simnet.callReadOnlyFn( + POX_CONTRACT, + "can-stack-stx", + canStackArgs, + address1 + ); + expect(result).toBeErr(Cl.int(ERRORS.ERR_STACKING_THRESHOLD_NOT_MET)); + }); + + it("returns error if period is too low or to high", () => { + const stacker = stackers[0]; + const amount = getStackingMinimum() * 2n; + const canStackArgsTooLow = [ + poxAddressToTuple(stacker.btcAddr), + Cl.uint(amount), + Cl.uint(1), // first reward cycle + Cl.uint(0), // lock period + ]; + + const { result: resultTooLow } = simnet.callReadOnlyFn( + POX_CONTRACT, + "can-stack-stx", + canStackArgsTooLow, + address1 + ); + expect(resultTooLow).toBeErr( + Cl.int(ERRORS.ERR_STACKING_INVALID_LOCK_PERIOD) + ); + + const canStackArgsTooHigh = [ + poxAddressToTuple(stacker.btcAddr), + Cl.uint(amount), + Cl.uint(1), // first reward cycle + Cl.uint(13), // lock period + ]; + + const { result: resultTooHigh } = simnet.callReadOnlyFn( + POX_CONTRACT, + "can-stack-stx", + canStackArgsTooHigh, + address1 + ); + expect(resultTooHigh).toBeErr( + Cl.int(ERRORS.ERR_STACKING_INVALID_LOCK_PERIOD) + ); + }); + + it("returns error if pox address is invalid", () => { + const addressTupleWrongVersion = Cl.tuple({ + hashbytes: Cl.buffer( + Buffer.from("j89046x7zv6pm4n00qgqp505nvljnfp6xfznyw") + ), + version: Cl.buffer(Buffer.from([7])), + }); + const amount = getStackingMinimum() * 2n; + const canStackArgs = [ + addressTupleWrongVersion, + Cl.uint(amount), + Cl.uint(1), // first reward cycle + Cl.uint(6), // lock period + ]; + const { result: resultWrongVersion } = simnet.callReadOnlyFn( + POX_CONTRACT, + "can-stack-stx", + canStackArgs, + address1 + ); + expect(resultWrongVersion).toBeErr( + Cl.int(ERRORS.ERR_STACKING_INVALID_POX_ADDRESS) + ); + }); +}); + +describe("test `check-pox-addr-hashbytes`", () => { + it("returns true for a valid address", () => { + let poxAddr = poxAddressToTuple(stackers[0].btcAddr); + const response = simnet.callReadOnlyFn( + POX_CONTRACT, + "check-pox-addr-hashbytes", + [poxAddr.data.version, poxAddr.data.hashbytes], + address1 + ); + + expect(response.result).toBeBool(true); + }); + + it("returns false when a 20 byte hash is too short", () => { + let version = Cl.bufferFromHex("01"); + let hashbytes = Cl.bufferFromHex("deadbeef"); + const response = simnet.callReadOnlyFn( + POX_CONTRACT, + "check-pox-addr-hashbytes", + [version, hashbytes], + address1 + ); + + expect(response.result).toBeBool(false); + }); + + it("returns false when a 20 byte hash is too long", () => { + let version = Cl.bufferFromHex("04"); + let hashbytes = Cl.bufferFromHex("deadbeefdeadbeefdeadbeef"); + const response = simnet.callReadOnlyFn( + POX_CONTRACT, + "check-pox-addr-hashbytes", + [version, hashbytes], + address1 + ); + + expect(response.result).toBeBool(false); + }); + + it("returns false when a 32 byte hash is too short", () => { + let version = Cl.bufferFromHex("05"); + let hashbytes = Cl.bufferFromHex("deadbeefdeadbeefdead"); + const response = simnet.callReadOnlyFn( + POX_CONTRACT, + "check-pox-addr-hashbytes", + [version, hashbytes], + address1 + ); + + expect(response.result).toBeBool(false); + }); + + it("returns false when a 32 byte hash is too long", () => { + let version = Cl.bufferFromHex("06"); + let hashbytes = Cl.bufferFromHex("deadbeefdeadbeefdeadbeefdeadbeef01"); + const response = simnet.callReadOnlyFn( + POX_CONTRACT, + "check-pox-addr-hashbytes", + [version, hashbytes], + address1 + ); + + expect(response.result).toBeBool(false); + }); + + it("returns false when the version is too high", () => { + let version = Cl.bufferFromHex("07"); + let hashbytes = Cl.bufferFromHex("deadbeefdeadbeefdeadbeefdeadbeef"); + const response = simnet.callReadOnlyFn( + POX_CONTRACT, + "check-pox-addr-hashbytes", + [version, hashbytes], + address1 + ); + + expect(response.result).toBeBool(false); + }); +}); + +describe("test `minimal-can-stack-stx`", () => { + it("returns true for valid args", () => { + const poxAddr = poxAddressToTuple(stackers[0].btcAddr); + const amount = 1000n; + const firstCycle = 1n; + const lockPeriod = 6n; + const response = simnet.callReadOnlyFn( + POX_CONTRACT, + "minimal-can-stack-stx", + [poxAddr, Cl.uint(amount), Cl.uint(firstCycle), Cl.uint(lockPeriod)], + address1 + ); + + expect(response.result).toBeOk(Cl.bool(true)); + }); + + it("returns false for a 0 amount", () => { + const poxAddr = poxAddressToTuple(stackers[0].btcAddr); + const amount = 0n; + const firstCycle = 1n; + const lockPeriod = 6n; + const response = simnet.callReadOnlyFn( + POX_CONTRACT, + "minimal-can-stack-stx", + [poxAddr, Cl.uint(amount), Cl.uint(firstCycle), Cl.uint(lockPeriod)], + address1 + ); + + expect(response.result).toBeErr(Cl.int(ERRORS.ERR_STACKING_INVALID_AMOUNT)); + }); + + it("returns false for an invalid lock period", () => { + const poxAddr = poxAddressToTuple(stackers[0].btcAddr); + const amount = 1000n; + const firstCycle = 1n; + const lockPeriod = 13n; + const response = simnet.callReadOnlyFn( + POX_CONTRACT, + "minimal-can-stack-stx", + [poxAddr, Cl.uint(amount), Cl.uint(firstCycle), Cl.uint(lockPeriod)], + address1 + ); + + expect(response.result).toBeErr( + Cl.int(ERRORS.ERR_STACKING_INVALID_LOCK_PERIOD) + ); + }); + + it("returns false for a bad address version", () => { + const poxAddr = poxAddressToTuple(stackers[0].btcAddr); + poxAddr.data["version"] = Cl.bufferFromHex("0a"); + const amount = 1000n; + const firstCycle = 1n; + const lockPeriod = 6n; + const response = simnet.callReadOnlyFn( + POX_CONTRACT, + "minimal-can-stack-stx", + [poxAddr, Cl.uint(amount), Cl.uint(firstCycle), Cl.uint(lockPeriod)], + address1 + ); + + expect(response.result).toBeErr( + Cl.int(ERRORS.ERR_STACKING_INVALID_POX_ADDRESS) + ); + }); + + it("returns false for a bad address hashbytes", () => { + const poxAddr = poxAddressToTuple(stackers[0].btcAddr); + poxAddr.data["hashbytes"] = Cl.bufferFromHex("deadbeef"); + const amount = 1000n; + const firstCycle = 1n; + const lockPeriod = 6n; + const response = simnet.callReadOnlyFn( + POX_CONTRACT, + "minimal-can-stack-stx", + [poxAddr, Cl.uint(amount), Cl.uint(firstCycle), Cl.uint(lockPeriod)], + address1 + ); + + expect(response.result).toBeErr( + Cl.int(ERRORS.ERR_STACKING_INVALID_POX_ADDRESS) + ); + }); +}); + +describe("test `verify-signer-key-sig`", () => { + it("returns `(ok true)` for a valid signature", () => { + const account = stackers[0]; + const amount = getStackingMinimum() * 2n; + const maxAmount = amount * 2n; + const poxAddr = poxAddressToTuple(account.btcAddr); + const rewardCycle = 1; + const period = 1; + const authId = 1; + const topic = Pox4SignatureTopic.AggregateCommit; + const sigArgs = { + authId, + maxAmount, + rewardCycle, + period, + topic, + poxAddress: account.btcAddr, + signerPrivateKey: account.signerPrivKey, + }; + const signerSignature = account.client.signPoxSignature(sigArgs); + + const response = simnet.callReadOnlyFn( + POX_CONTRACT, + "verify-signer-key-sig", + [ + poxAddr, + Cl.uint(rewardCycle), + Cl.stringAscii(topic), + Cl.uint(period), + Cl.some(Cl.bufferFromHex(signerSignature)), + Cl.bufferFromHex(account.signerPubKey), + Cl.uint(amount), + Cl.uint(maxAmount), + Cl.uint(authId), + ], + address1 + ); + expect(response.result).toBeOk(Cl.bool(true)); + }); + + it("returns `(ok true)` for a valid prior authorization", () => { + const account = stackers[0]; + const amount = getStackingMinimum() * 2n; + const maxAmount = amount * 2n; + const poxAddr = poxAddressToTuple(account.btcAddr); + const rewardCycle = 1; + const period = 1; + const authId = 1; + const topic = Pox4SignatureTopic.AggregateCommit; + + simnet.callPublicFn( + POX_CONTRACT, + "set-signer-key-authorization", + [ + poxAddr, + Cl.uint(period), + Cl.uint(rewardCycle), + Cl.stringAscii(topic), + Cl.bufferFromHex(account.signerPubKey), + Cl.bool(true), + Cl.uint(maxAmount), + Cl.uint(authId), + ], + account.stxAddress + ); + + const response = simnet.callReadOnlyFn( + POX_CONTRACT, + "verify-signer-key-sig", + [ + poxAddr, + Cl.uint(rewardCycle), + Cl.stringAscii(topic), + Cl.uint(period), + Cl.none(), + Cl.bufferFromHex(account.signerPubKey), + Cl.uint(amount), + Cl.uint(maxAmount), + Cl.uint(authId), + ], + address1 + ); + expect(response.result).toBeOk(Cl.bool(true)); + }); + + it("returns an error if the amount is too high", () => { + const account = stackers[0]; + const maxAmount = getStackingMinimum(); + const amount = maxAmount * 2n; + const poxAddr = poxAddressToTuple(account.btcAddr); + const rewardCycle = 1; + const period = 1; + const authId = 1; + const topic = Pox4SignatureTopic.AggregateCommit; + const sigArgs = { + authId, + maxAmount, + rewardCycle, + period, + topic, + poxAddress: account.btcAddr, + signerPrivateKey: account.signerPrivKey, + }; + const signerSignature = account.client.signPoxSignature(sigArgs); + + const response = simnet.callReadOnlyFn( + POX_CONTRACT, + "verify-signer-key-sig", + [ + poxAddr, + Cl.uint(rewardCycle), + Cl.stringAscii(topic), + Cl.uint(period), + Cl.some(Cl.bufferFromHex(signerSignature)), + Cl.bufferFromHex(account.signerPubKey), + Cl.uint(amount), + Cl.uint(maxAmount), + Cl.uint(authId), + ], + address1 + ); + expect(response.result).toBeErr( + Cl.int(ERRORS.ERR_SIGNER_AUTH_AMOUNT_TOO_HIGH) + ); + }); + + it("returns an error for a used authorization", () => { + const account = stackers[0]; + const amount = getStackingMinimum() * 2n; + const maxAmount = amount * 2n; + const poxAddr = poxAddressToTuple(account.btcAddr); + const rewardCycle = 1; + const period = 1; + const authId = 1; + const topic = Pox4SignatureTopic.AggregateCommit; + const sigArgs = { + authId, + maxAmount, + rewardCycle, + period, + topic, + poxAddress: account.btcAddr, + signerPrivateKey: account.signerPrivKey, + }; + const signerSignature = account.client.signPoxSignature(sigArgs); + + simnet.callPrivateFn( + POX_CONTRACT, + "consume-signer-key-authorization", + [ + poxAddr, + Cl.uint(rewardCycle), + Cl.stringAscii(topic), + Cl.uint(period), + Cl.some(Cl.bufferFromHex(signerSignature)), + Cl.bufferFromHex(account.signerPubKey), + Cl.uint(amount), + Cl.uint(maxAmount), + Cl.uint(authId), + ], + address1 + ); + + const response = simnet.callReadOnlyFn( + POX_CONTRACT, + "verify-signer-key-sig", + [ + poxAddr, + Cl.uint(rewardCycle), + Cl.stringAscii(topic), + Cl.uint(period), + Cl.some(Cl.bufferFromHex(signerSignature)), + Cl.bufferFromHex(account.signerPubKey), + Cl.uint(amount), + Cl.uint(maxAmount), + Cl.uint(authId), + ], + address1 + ); + expect(response.result).toBeErr(Cl.int(ERRORS.ERR_SIGNER_AUTH_USED)); + }); + + it("returns an error for an invalid signature", () => { + const account = stackers[0]; + const amount = getStackingMinimum() * 2n; + const maxAmount = amount * 2n; + const poxAddr = poxAddressToTuple(account.btcAddr); + const rewardCycle = 1; + const period = 1; + const authId = 1; + const topic = Pox4SignatureTopic.AggregateCommit; + const sigArgs = { + authId, + maxAmount, + rewardCycle, + period, + topic, + poxAddress: account.btcAddr, + signerPrivateKey: account.signerPrivKey, + }; + const signerSignature = account.client.signPoxSignature(sigArgs); + const invalidSignature = signerSignature.slice(0, -2); + + const response = simnet.callReadOnlyFn( + POX_CONTRACT, + "verify-signer-key-sig", + [ + poxAddr, + Cl.uint(rewardCycle), + Cl.stringAscii(topic), + Cl.uint(period), + Cl.some(Cl.bufferFromHex(invalidSignature)), + Cl.bufferFromHex(account.signerPubKey), + Cl.uint(amount), + Cl.uint(maxAmount), + Cl.uint(authId), + ], + address1 + ); + expect(response.result).toBeErr( + Cl.int(ERRORS.ERR_INVALID_SIGNATURE_RECOVER) + ); + }); + + it("returns an error for a signature that does not match", () => { + const account = stackers[0]; + const account2 = stackers[1]; + const amount = getStackingMinimum() * 2n; + const maxAmount = amount * 2n; + const poxAddr = poxAddressToTuple(account.btcAddr); + const rewardCycle = 1; + const period = 1; + const authId = 1; + const topic = Pox4SignatureTopic.AggregateCommit; + const sigArgs = { + authId, + maxAmount, + rewardCycle, + period, + topic, + poxAddress: account.btcAddr, + signerPrivateKey: account2.signerPrivKey, + }; + const signerSignature = account2.client.signPoxSignature(sigArgs); + + const response = simnet.callReadOnlyFn( + POX_CONTRACT, + "verify-signer-key-sig", + [ + poxAddr, + Cl.uint(rewardCycle), + Cl.stringAscii(topic), + Cl.uint(period), + Cl.some(Cl.bufferFromHex(signerSignature)), + Cl.bufferFromHex(account.signerPubKey), + Cl.uint(amount), + Cl.uint(maxAmount), + Cl.uint(authId), + ], + address1 + ); + expect(response.result).toBeErr( + Cl.int(ERRORS.ERR_INVALID_SIGNATURE_PUBKEY) + ); + }); + + it("returns an error if not signature is passed and there is no prior authorization", () => { + const account = stackers[0]; + const amount = getStackingMinimum() * 2n; + const maxAmount = amount * 2n; + const poxAddr = poxAddressToTuple(account.btcAddr); + const rewardCycle = 1; + const period = 1; + const authId = 1; + const topic = Pox4SignatureTopic.AggregateCommit; + + const response = simnet.callReadOnlyFn( + POX_CONTRACT, + "verify-signer-key-sig", + [ + poxAddr, + Cl.uint(rewardCycle), + Cl.stringAscii(topic), + Cl.uint(period), + Cl.none(), + Cl.bufferFromHex(account.signerPubKey), + Cl.uint(amount), + Cl.uint(maxAmount), + Cl.uint(authId), + ], + address1 + ); + expect(response.result).toBeErr(Cl.int(ERRORS.ERR_NOT_ALLOWED)); + }); +}); + +describe("test `consume-signer-key-authorization`", () => { + it("returns `(ok true)` for a valid signature", () => { + const account = stackers[0]; + const amount = getStackingMinimum() * 2n; + const maxAmount = amount * 2n; + const poxAddr = poxAddressToTuple(account.btcAddr); + const rewardCycle = 1; + const period = 1; + const authId = 1; + const topic = Pox4SignatureTopic.AggregateCommit; + const sigArgs = { + authId, + maxAmount, + rewardCycle, + period, + topic, + poxAddress: account.btcAddr, + signerPrivateKey: account.signerPrivKey, + }; + const signerSignature = account.client.signPoxSignature(sigArgs); + + const response = simnet.callPrivateFn( + POX_CONTRACT, + "consume-signer-key-authorization", + [ + poxAddr, + Cl.uint(rewardCycle), + Cl.stringAscii(topic), + Cl.uint(period), + Cl.some(Cl.bufferFromHex(signerSignature)), + Cl.bufferFromHex(account.signerPubKey), + Cl.uint(amount), + Cl.uint(maxAmount), + Cl.uint(authId), + ], + address1 + ); + expect(response.result).toBeOk(Cl.bool(true)); + }); + + it("returns an error for a used authorization", () => { + const account = stackers[0]; + const amount = getStackingMinimum() * 2n; + const maxAmount = amount * 2n; + const poxAddr = poxAddressToTuple(account.btcAddr); + const rewardCycle = 1; + const period = 1; + const authId = 1; + const topic = Pox4SignatureTopic.AggregateCommit; + const sigArgs = { + authId, + maxAmount, + rewardCycle, + period, + topic, + poxAddress: account.btcAddr, + signerPrivateKey: account.signerPrivKey, + }; + const signerSignature = account.client.signPoxSignature(sigArgs); + + simnet.callPrivateFn( + POX_CONTRACT, + "consume-signer-key-authorization", + [ + poxAddr, + Cl.uint(rewardCycle), + Cl.stringAscii(topic), + Cl.uint(period), + Cl.some(Cl.bufferFromHex(signerSignature)), + Cl.bufferFromHex(account.signerPubKey), + Cl.uint(amount), + Cl.uint(maxAmount), + Cl.uint(authId), + ], + address1 + ); + + const response = simnet.callPrivateFn( + POX_CONTRACT, + "consume-signer-key-authorization", + [ + poxAddr, + Cl.uint(rewardCycle), + Cl.stringAscii(topic), + Cl.uint(period), + Cl.some(Cl.bufferFromHex(signerSignature)), + Cl.bufferFromHex(account.signerPubKey), + Cl.uint(amount), + Cl.uint(maxAmount), + Cl.uint(authId), + ], + address1 + ); + expect(response.result).toBeErr(Cl.int(ERRORS.ERR_SIGNER_AUTH_USED)); + }); +}); + +describe("test `set-signer-key-authorization`", () => { + it("returns `(ok true)` for a valid authorization", () => { + const stacker = stackers[0]; + const period = 1; + const rewardCycle = 1; + const topic = Pox4SignatureTopic.AggregateCommit; + const allowed = true; + const maxAmount = getStackingMinimum() * 2n; + const authId = 1; + + const response = setSignerKeyAuthorization( + stacker, + period, + rewardCycle, + topic, + allowed, + maxAmount, + authId + ); + expect(response.result).toBeOk(Cl.bool(true)); + }); + + it("returns `(ok false)` for a valid deauthorization", () => { + const stacker = stackers[0]; + const period = 1; + const rewardCycle = 1; + const topic = Pox4SignatureTopic.AggregateCommit; + const allowed = false; + const maxAmount = getStackingMinimum() * 2n; + const authId = 1; + + const response = setSignerKeyAuthorization( + stacker, + period, + rewardCycle, + topic, + allowed, + maxAmount, + authId + ); + expect(response.result).toBeOk(Cl.bool(false)); + }); + + it("cannot be called indirectly by an unauthorized caller", () => { + const stacker = stackers[0]; + const period = 1; + const rewardCycle = 1; + const topic = Pox4SignatureTopic.AggregateCommit; + const allowed = false; + const maxAmount = getStackingMinimum() * 2n; + const authId = 1; + + const args = [ + poxAddressToTuple(stacker.btcAddr), + Cl.uint(period), + Cl.uint(rewardCycle), + Cl.stringAscii(topic), + Cl.bufferFromHex(stacker.signerPubKey), + Cl.bool(allowed), + Cl.uint(maxAmount), + Cl.uint(authId), + ]; + const response = simnet.callPublicFn( + "indirect", + "set-signer-key-authorization", + args, + stacker.stxAddress + ); + expect(response.result).toBeErr(Cl.int(ERRORS.ERR_NOT_ALLOWED)); + }); + + it("can be called indirectly by an authorized caller", () => { + const stacker = stackers[0]; + const period = 1; + const rewardCycle = 1; + const topic = Pox4SignatureTopic.AggregateCommit; + const allowed = true; + const maxAmount = getStackingMinimum() * 2n; + const authId = 1; + + allowContractCaller(`${deployer}.indirect`, null, stacker.stxAddress); + + const args = [ + poxAddressToTuple(stacker.btcAddr), + Cl.uint(period), + Cl.uint(rewardCycle), + Cl.stringAscii(topic), + Cl.bufferFromHex(stacker.signerPubKey), + Cl.bool(allowed), + Cl.uint(maxAmount), + Cl.uint(authId), + ]; + const response = simnet.callPublicFn( + "indirect", + "set-signer-key-authorization", + args, + stacker.stxAddress + ); + expect(response.result).toBeOk(Cl.bool(true)); + }); + + it("cannot be called by a different principal", () => { + const stacker = stackers[0]; + const period = 1; + const rewardCycle = 1; + const topic = Pox4SignatureTopic.AggregateCommit; + const allowed = true; + const maxAmount = getStackingMinimum() * 2n; + const authId = 1; + + const args = [ + poxAddressToTuple(stacker.btcAddr), + Cl.uint(period), + Cl.uint(rewardCycle), + Cl.stringAscii(topic), + Cl.bufferFromHex(stacker.signerPubKey), + Cl.bool(allowed), + Cl.uint(maxAmount), + Cl.uint(authId), + ]; + const response = simnet.callPublicFn( + POX_CONTRACT, + "set-signer-key-authorization", + args, + address3 + ); + expect(response.result).toBeErr(Cl.int(ERRORS.ERR_NOT_ALLOWED)); + }); + + it("returns an error for a period of 0", () => { + const stacker = stackers[0]; + const period = 0; + const rewardCycle = 1; + const topic = Pox4SignatureTopic.AggregateCommit; + const allowed = true; + const maxAmount = getStackingMinimum() * 2n; + const authId = 1; + + const response = setSignerKeyAuthorization( + stacker, + period, + rewardCycle, + topic, + allowed, + maxAmount, + authId + ); + expect(response.result).toBeErr( + Cl.int(ERRORS.ERR_STACKING_INVALID_LOCK_PERIOD) + ); + }); + + it("returns an error for a reward cycle in the past", () => { + const stacker = stackers[0]; + const period = 1; + const rewardCycle = 1; + const topic = Pox4SignatureTopic.AggregateCommit; + const allowed = true; + const maxAmount = getStackingMinimum() * 2n; + const authId = 1; + + simnet.mineEmptyBlocks(1050 * 2); + + const response = setSignerKeyAuthorization( + stacker, + period, + rewardCycle, + topic, + allowed, + maxAmount, + authId + ); + expect(response.result).toBeErr(Cl.int(ERRORS.ERR_INVALID_REWARD_CYCLE)); + }); +}); + +describe("test `get-num-reward-set-pox-addresses`", () => { + it("returns 0 when there are no stackers", () => { + const response = simnet.callReadOnlyFn( + POX_CONTRACT, + "get-num-reward-set-pox-addresses", + [Cl.uint(1)], + address1 + ); + expect(response.result).toBeUint(0); + }); + + it("returns the number of stackers", () => { + const amount = getStackingMinimum() * 2n; + stackers.forEach((stacker) => { + stackStx( + stacker, + amount, + 1000, + 6, + amount, + stacker.authId, + stacker.stxAddress + ); + }); + + const response = simnet.callReadOnlyFn( + POX_CONTRACT, + "get-num-reward-set-pox-addresses", + [Cl.uint(1)], + address1 + ); + expect(response.result).toBeUint(stackers.length); + }); + + it("returns the number of stackers for a specific reward cycle", () => { + const amount = getStackingMinimum() * 2n; + stackers.forEach((stacker) => { + stackStx( + stacker, + amount, + 1000, + 6, + amount, + stacker.authId, + stacker.stxAddress + ); + }); + + const response = simnet.callReadOnlyFn( + POX_CONTRACT, + "get-num-reward-set-pox-addresses", + [Cl.uint(2)], + address1 + ); + expect(response.result).toBeUint(stackers.length); + }); + + it("returns 0 when there are expired stackers", () => { + const amount = getStackingMinimum() * 2n; + stackers.forEach((stacker) => { + stackStx( + stacker, + amount, + 1000, + 6, + amount, + stacker.authId, + stacker.stxAddress + ); + }); + + const response = simnet.callReadOnlyFn( + POX_CONTRACT, + "get-num-reward-set-pox-addresses", + [Cl.uint(8)], + address1 + ); + expect(response.result).toBeUint(0); + }); + + it("handles delegated stacking", () => { + const account = stackers[0]; + const minAmount = getStackingMinimum(); + const amount = minAmount * 2n; + const maxAmount = minAmount * 4n; + const startBurnHeight = 1000; + const lockPeriod = 6; + const rewardCycle = 1; + const authId = 1; + + delegateStx(maxAmount, address3, null, account.btcAddr, account.stxAddress); + delegateStackStx( + account.stxAddress, + amount, + account.btcAddr, + startBurnHeight, + lockPeriod, + address3 + ); + + stackAggregationCommitIndexed( + account, + rewardCycle, + maxAmount, + authId, + address3 + ); + + const response = simnet.callReadOnlyFn( + POX_CONTRACT, + "get-num-reward-set-pox-addresses", + [Cl.uint(1)], + address1 + ); + expect(response.result).toBeUint(1); + }); +}); diff --git a/contrib/boot-contracts-unit-tests/tests/pool-delegate.test.ts b/contrib/boot-contracts-unit-tests/tests/pool-delegate.test.ts new file mode 100644 index 00000000000..52ea0ae95cb --- /dev/null +++ b/contrib/boot-contracts-unit-tests/tests/pool-delegate.test.ts @@ -0,0 +1,2867 @@ +import { assert, beforeEach, describe, expect, it } from "vitest"; + +import { + Cl, + ClarityType, + ResponseCV, + SomeCV, + TupleCV, + UIntCV, + cvToString, +} from "@stacks/transactions"; +import { Pox4SignatureTopic, poxAddressToTuple } from "@stacks/stacking"; +import { + ERRORS, + POX_CONTRACT, + allowContractCaller, + delegateStackExtend, + delegateStackIncrease, + delegateStackStx, + delegateStx, + getPoxInfo, + getStackerInfo, + getStackingMinimum, + stackAggregationCommitIndexed, + stackAggregationIncrease, + stackStx, + stackers, +} from "./helpers"; + +const accounts = simnet.getAccounts(); +const deployer = accounts.get("deployer")!; +const address1 = accounts.get("wallet_1")!; +const address2 = accounts.get("wallet_2")!; +const address3 = accounts.get("wallet_3")!; + +beforeEach(() => { + simnet.setEpoch("3.0"); +}); + +describe("test `get-check-delegation`", () => { + it("returns none when principal is not delegated", () => { + const response = simnet.callReadOnlyFn( + POX_CONTRACT, + "get-check-delegation", + [Cl.principal(address1)], + address1 + ); + expect(response.result).toBeNone(); + }); + + it("returns info after delegation", () => { + const amount = getStackingMinimum() * 2n; + + const untilBurnHeight = 10; + const delegateResponse = delegateStx( + amount, + address2, + untilBurnHeight, + stackers[0].btcAddr, + address1 + ); + expect(delegateResponse.events).toHaveLength(1); + expect(delegateResponse.result).toBeOk(Cl.bool(true)); + + const delegateInfo = simnet.callReadOnlyFn( + POX_CONTRACT, + "get-check-delegation", + [Cl.principal(address1)], + address1 + ); + expect(delegateInfo.result).toBeSome( + Cl.tuple({ + "amount-ustx": Cl.uint(amount), + "delegated-to": Cl.principal( + "ST2CY5V39NHDPWSXMW9QDT3HC3GD6Q6XX4CFRK9AG" + ), + "pox-addr": Cl.some(poxAddressToTuple(stackers[0].btcAddr)), + "until-burn-ht": Cl.some(Cl.uint(untilBurnHeight)), + }) + ); + }); + + it("does not expire if no burn height limit is set", () => { + const amount = getStackingMinimum() * 2n; + + delegateStx(amount, address2, null, stackers[0].btcAddr, address1); + + const delegateInfo = simnet.callReadOnlyFn( + POX_CONTRACT, + "get-check-delegation", + [Cl.principal(address1)], + address1 + ); + + simnet.mineEmptyBlocks(10_000); + expect(delegateInfo.result).toBeSome( + Cl.tuple({ + "amount-ustx": Cl.uint(amount), + "delegated-to": Cl.principal( + "ST2CY5V39NHDPWSXMW9QDT3HC3GD6Q6XX4CFRK9AG" + ), + "pox-addr": Cl.some(poxAddressToTuple(stackers[0].btcAddr)), + "until-burn-ht": Cl.none(), + }) + ); + }); + + it("returns none after burn height expiration", () => { + const amount = getStackingMinimum() * 2n; + simnet.mineEmptyBlock(); + + const untilBurnHeight = 10; + delegateStx( + amount, + address2, + untilBurnHeight, + stackers[0].btcAddr, + address1 + ); + + simnet.mineEmptyBlocks(2 + untilBurnHeight - simnet.blockHeight); + // a stacks block height of 12 means a burnchain block height of 11 + assert(simnet.blockHeight === 12); + + const delegateInfo = simnet.callReadOnlyFn( + POX_CONTRACT, + "get-check-delegation", + [Cl.principal(address1)], + address1 + ); + expect(delegateInfo.result).toBeNone(); + }); +}); + +describe("test `get-delegation-info`", () => { + it("returns none when principal is not delegated", () => { + const response = simnet.callReadOnlyFn( + POX_CONTRACT, + "get-delegation-info", + [Cl.principal(address1)], + address1 + ); + expect(response.result).toBeNone(); + }); + + it("returns info after delegation", () => { + const amount = getStackingMinimum() * 2n; + + const untilBurnHeight = 10; + const delegateResponse = delegateStx( + amount, + address2, + untilBurnHeight, + stackers[0].btcAddr, + address1 + ); + expect(delegateResponse.events).toHaveLength(1); + expect(delegateResponse.result).toBeOk(Cl.bool(true)); + + const delegateInfo = simnet.callReadOnlyFn( + POX_CONTRACT, + "get-delegation-info", + [Cl.principal(address1)], + address1 + ); + expect(delegateInfo.result).toBeSome( + Cl.tuple({ + "amount-ustx": Cl.uint(amount), + "delegated-to": Cl.principal( + "ST2CY5V39NHDPWSXMW9QDT3HC3GD6Q6XX4CFRK9AG" + ), + "pox-addr": Cl.some(poxAddressToTuple(stackers[0].btcAddr)), + "until-burn-ht": Cl.some(Cl.uint(untilBurnHeight)), + }) + ); + }); + + it("does not expire if no burn height limit is set", () => { + const amount = getStackingMinimum() * 2n; + + delegateStx(amount, address2, null, stackers[0].btcAddr, address1); + + const delegateInfo = simnet.callReadOnlyFn( + POX_CONTRACT, + "get-delegation-info", + [Cl.principal(address1)], + address1 + ); + + simnet.mineEmptyBlocks(10_000); + expect(delegateInfo.result).toBeSome( + Cl.tuple({ + "amount-ustx": Cl.uint(amount), + "delegated-to": Cl.principal( + "ST2CY5V39NHDPWSXMW9QDT3HC3GD6Q6XX4CFRK9AG" + ), + "pox-addr": Cl.some(poxAddressToTuple(stackers[0].btcAddr)), + "until-burn-ht": Cl.none(), + }) + ); + }); + + it("returns none after burn height expiration", () => { + const amount = getStackingMinimum() * 2n; + simnet.mineEmptyBlock(); + + const untilBurnHeight = 10; + delegateStx( + amount, + address2, + untilBurnHeight, + stackers[0].btcAddr, + address1 + ); + + simnet.mineEmptyBlocks(2 + untilBurnHeight - simnet.blockHeight); + // a stacks block height of 12 means a burnchain block height of 11 + assert(simnet.blockHeight === 12); + + const delegateInfo = simnet.callReadOnlyFn( + POX_CONTRACT, + "get-delegation-info", + [Cl.principal(address1)], + address1 + ); + expect(delegateInfo.result).toBeNone(); + }); +}); + +describe("test `get-allowance-contract-callers`", () => { + it("returns `none` when not allowed", () => { + const response = simnet.callReadOnlyFn( + POX_CONTRACT, + "get-allowance-contract-callers", + [Cl.principal(address1), Cl.contractPrincipal(deployer, "indirect")], + address1 + ); + expect(response.result).toBeNone(); + }); + + it("returns `(some none)` when allowed indefinitely", () => { + allowContractCaller(`${deployer}.indirect`, null, address1); + + const delegateInfo = simnet.callReadOnlyFn( + POX_CONTRACT, + "get-allowance-contract-callers", + [Cl.principal(address1), Cl.contractPrincipal(deployer, "indirect")], + address1 + ); + expect(delegateInfo.result).toBeSome( + Cl.tuple({ + "until-burn-ht": Cl.none(), + }) + ); + }); + + it("returns `(some (some X))` when allowed until burn height X", () => { + const untilBurnHeight = 10; + allowContractCaller(`${deployer}.indirect`, untilBurnHeight, address1); + + const delegateInfo = simnet.callReadOnlyFn( + POX_CONTRACT, + "get-allowance-contract-callers", + [Cl.principal(address1), Cl.contractPrincipal(deployer, "indirect")], + address1 + ); + expect(delegateInfo.result).toBeSome( + Cl.tuple({ + "until-burn-ht": Cl.some(Cl.uint(untilBurnHeight)), + }) + ); + }); + + it("returns `none` when a different caller is allowed", () => { + allowContractCaller(`${deployer}.not-indirect`, null, address1); + const response = simnet.callReadOnlyFn( + POX_CONTRACT, + "get-allowance-contract-callers", + [Cl.principal(address1), Cl.contractPrincipal(deployer, "indirect")], + address1 + ); + expect(response.result).toBeNone(); + }); +}); + +describe("test `delegate-stack-stx`", () => { + it("does not delegate if principal is not delegated", () => { + const amount = getStackingMinimum() * 2n; + const { result } = delegateStackStx( + address2, + amount, + stackers[0].btcAddr, + 1000, + 6, + address1 + ); + expect(result).toBeErr(Cl.int(ERRORS.ERR_STACKING_PERMISSION_DENIED)); + }); + + it("can call delegate-stack-stx", () => { + const amount = getStackingMinimum() * 2n; + delegateStx(amount, address2, null, stackers[0].btcAddr, address1); + const { result } = delegateStackStx( + address1, + amount, + stackers[0].btcAddr, + 1000, + 6, + address2 + ); + expect(result).toBeOk( + Cl.tuple({ + "lock-amount": Cl.uint(amount), + stacker: Cl.principal(address1), + "unlock-burn-height": Cl.uint(7350), + }) + ); + }); + + it("returns an error for stacking too early", () => { + const amount = getStackingMinimum() * 2n; + const startBurnHeight = 3000; + const lockPeriod = 6; + delegateStx(amount, address2, null, stackers[0].btcAddr, address1); + const { result } = delegateStackStx( + address1, + amount, + stackers[0].btcAddr, + startBurnHeight, + lockPeriod, + address2 + ); + expect(result).toBeErr(Cl.int(ERRORS.ERR_INVALID_START_BURN_HEIGHT)); + }); + + it("cannot be called indirectly by an unapproved caller", () => { + const amount = getStackingMinimum() * 2n; + const startBurnHeight = 1000; + const lockPeriod = 6; + delegateStx(amount, address2, null, stackers[0].btcAddr, address1); + + const response = simnet.callPublicFn( + "indirect", + "delegate-stack-stx", + [ + Cl.principal(address1), + Cl.uint(amount), + poxAddressToTuple(stackers[0].btcAddr), + Cl.uint(startBurnHeight), + Cl.uint(lockPeriod), + ], + address2 + ); + expect(response.result).toBeErr( + Cl.int(ERRORS.ERR_STACKING_PERMISSION_DENIED) + ); + }); + + it("can be called indirectly by an approved caller", () => { + const account = stackers[0]; + const amount = getStackingMinimum() * 2n; + const startBurnHeight = 1000; + const lockPeriod = 6; + delegateStx(amount, address2, null, stackers[0].btcAddr, address1); + allowContractCaller(`${deployer}.indirect`, null, address2); + + const response = simnet.callPublicFn( + "indirect", + "delegate-stack-stx", + [ + Cl.principal(address1), + Cl.uint(amount), + poxAddressToTuple(account.btcAddr), + Cl.uint(startBurnHeight), + Cl.uint(lockPeriod), + ], + address2 + ); + expect(response.result).toBeOk( + Cl.tuple({ + "lock-amount": Cl.uint(amount), + stacker: Cl.principal(account.stxAddress), + "unlock-burn-height": Cl.uint(7350), + }) + ); + }); + + it("returns an error if not delegated", () => { + const amount = getStackingMinimum() * 2n; + const { result } = delegateStackStx( + address1, + amount, + stackers[0].btcAddr, + 1000, + 6, + address2 + ); + expect(result).toBeErr(Cl.int(ERRORS.ERR_STACKING_PERMISSION_DENIED)); + }); + + it("returns an error if delegated to someone else", () => { + const amount = getStackingMinimum() * 2n; + delegateStx(amount, address2, null, stackers[0].btcAddr, address1); + const { result } = delegateStackStx( + address1, + amount, + stackers[0].btcAddr, + 1000, + 6, + address3 + ); + expect(result).toBeErr(Cl.int(ERRORS.ERR_STACKING_PERMISSION_DENIED)); + }); + + it("returns an error if stacking more than delegated", () => { + const amount = getStackingMinimum() * 2n; + delegateStx(amount, address2, null, stackers[0].btcAddr, address1); + const { result } = delegateStackStx( + address1, + amount + 1n, + stackers[0].btcAddr, + 1000, + 6, + address2 + ); + expect(result).toBeErr(Cl.int(ERRORS.ERR_DELEGATION_TOO_MUCH_LOCKED)); + }); + + it("returns an error if stacking to a different pox address", () => { + const amount = getStackingMinimum() * 2n; + delegateStx(amount, address2, null, stackers[0].btcAddr, address1); + const { result } = delegateStackStx( + address1, + amount, + stackers[1].btcAddr, + 1000, + 6, + address2 + ); + expect(result).toBeErr(Cl.int(ERRORS.ERR_DELEGATION_POX_ADDR_REQUIRED)); + }); + + it("can call delegate-stack-stx when no pox address was set", () => { + const amount = getStackingMinimum() * 2n; + delegateStx(amount, address2, null, null, address1); + const { result } = delegateStackStx( + address1, + amount, + stackers[0].btcAddr, + 1000, + 6, + address2 + ); + expect(result).toBeOk( + Cl.tuple({ + "lock-amount": Cl.uint(amount), + stacker: Cl.principal(address1), + "unlock-burn-height": Cl.uint(7350), + }) + ); + }); + + it("returns an error if stacking beyond the delegation height", () => { + const amount = getStackingMinimum() * 2n; + delegateStx(amount, address2, 2000, stackers[0].btcAddr, address1); + const { result } = delegateStackStx( + address1, + amount, + stackers[0].btcAddr, + 1000, + 6, + address2 + ); + expect(result).toBeErr(Cl.int(ERRORS.ERR_DELEGATION_EXPIRES_DURING_LOCK)); + }); + + it("returns an error if stacker is already stacked", () => { + const stacker = stackers[0]; + const amount = getStackingMinimum() * 2n; + const startBurnHeight = 1000; + const lockPeriod = 6; + + delegateStx(amount, address2, null, stackers[0].btcAddr, address1); + delegateStackStx( + address1, + amount, + stacker.btcAddr, + startBurnHeight, + lockPeriod, + address2 + ); + const { result } = delegateStackStx( + address1, + amount, + stacker.btcAddr, + startBurnHeight, + lockPeriod, + address2 + ); + expect(result).toBeErr(Cl.int(ERRORS.ERR_STACKING_ALREADY_STACKED)); + }); + + it("returns an error if stacker does not have enough unlocked stacks", () => { + const stacker = stackers[0]; + const amount = + simnet.getAssetsMap().get("STX")?.get(stacker.stxAddress)! + 10n; + const startBurnHeight = 1000; + const lockPeriod = 6; + + delegateStx(amount, address2, null, stackers[0].btcAddr, address1); + const { result } = delegateStackStx( + address1, + amount, + stacker.btcAddr, + startBurnHeight, + lockPeriod, + address2 + ); + expect(result).toBeErr(Cl.int(ERRORS.ERR_STACKING_INSUFFICIENT_FUNDS)); + }); + + it("returns an error if amount is 0", () => { + const stacker = stackers[0]; + const amount = 0; + const startBurnHeight = 1000; + const lockPeriod = 6; + + delegateStx(amount, address2, null, stackers[0].btcAddr, address1); + const { result } = delegateStackStx( + address1, + amount, + stacker.btcAddr, + startBurnHeight, + lockPeriod, + address2 + ); + expect(result).toBeErr(Cl.int(ERRORS.ERR_STACKING_INVALID_AMOUNT)); + }); +}); + +describe("test `stack-aggregation-commit-indexed`", () => { + it("returns `(ok uint)` on success", () => { + const account = stackers[0]; + const amount = getStackingMinimum() * 2n; + const maxAmount = amount * 2n; + delegateStx(amount, address2, null, account.btcAddr, account.stxAddress); + const { result } = delegateStackStx( + account.stxAddress, + amount, + account.btcAddr, + 1000, + 6, + address2 + ); + expect(result).toBeOk( + Cl.tuple({ + "lock-amount": Cl.uint(amount), + stacker: Cl.principal(account.stxAddress), + "unlock-burn-height": Cl.uint(7350), + }) + ); + + const poxAddr = poxAddressToTuple(account.btcAddr); + const rewardCycle = 1; + const period = 1; + const authId = 1; + const sigArgs = { + authId, + maxAmount, + rewardCycle, + period, + topic: Pox4SignatureTopic.AggregateCommit, + poxAddress: account.btcAddr, + signerPrivateKey: account.signerPrivKey, + }; + const signerSignature = account.client.signPoxSignature(sigArgs); + const signerKey = Cl.bufferFromHex(account.signerPubKey); + const response = simnet.callPublicFn( + POX_CONTRACT, + "stack-aggregation-commit-indexed", + [ + poxAddr, + Cl.uint(rewardCycle), + Cl.some(Cl.bufferFromHex(signerSignature)), + signerKey, + Cl.uint(maxAmount), + Cl.uint(authId), + ], + address2 + ); + expect(response.result).toBeOk(Cl.uint(0)); + }); + + it("returns an error when there is no partially stacked STX", () => { + const account = stackers[0]; + const amount = getStackingMinimum() * 2n; + const maxAmount = amount * 2n; + delegateStx(amount, address2, null, account.btcAddr, account.stxAddress); + + const poxAddr = poxAddressToTuple(account.btcAddr); + const rewardCycle = 1; + const period = 1; + const authId = 1; + const sigArgs = { + authId, + maxAmount, + rewardCycle, + period, + topic: Pox4SignatureTopic.AggregateCommit, + poxAddress: account.btcAddr, + signerPrivateKey: account.signerPrivKey, + }; + const signerSignature = account.client.signPoxSignature(sigArgs); + const signerKey = Cl.bufferFromHex(account.signerPubKey); + const response = simnet.callPublicFn( + POX_CONTRACT, + "stack-aggregation-commit-indexed", + [ + poxAddr, + Cl.uint(rewardCycle), + Cl.some(Cl.bufferFromHex(signerSignature)), + signerKey, + Cl.uint(maxAmount), + Cl.uint(authId), + ], + address2 + ); + expect(response.result).toBeErr( + Cl.int(ERRORS.ERR_STACKING_NO_SUCH_PRINCIPAL) + ); + }); + + it("returns an error when called by an unauthorized caller", () => { + const account = stackers[0]; + const amount = getStackingMinimum() * 2n; + const maxAmount = amount * 2n; + delegateStx(amount, address2, null, account.btcAddr, account.stxAddress); + delegateStackStx( + account.stxAddress, + amount, + account.btcAddr, + 1000, + 6, + address2 + ); + + const poxAddr = poxAddressToTuple(account.btcAddr); + const rewardCycle = 1; + const period = 1; + const authId = 1; + const sigArgs = { + authId, + maxAmount, + rewardCycle, + period, + topic: Pox4SignatureTopic.AggregateCommit, + poxAddress: account.btcAddr, + signerPrivateKey: account.signerPrivKey, + }; + const signerSignature = account.client.signPoxSignature(sigArgs); + const signerKey = Cl.bufferFromHex(account.signerPubKey); + const response = simnet.callPublicFn( + "indirect", + "stack-aggregation-commit-indexed", + [ + poxAddr, + Cl.uint(rewardCycle), + Cl.some(Cl.bufferFromHex(signerSignature)), + signerKey, + Cl.uint(maxAmount), + Cl.uint(authId), + ], + address2 + ); + expect(response.result).toBeErr( + Cl.int(ERRORS.ERR_STACKING_PERMISSION_DENIED) + ); + }); + + it("can be called indirectly by an authorized caller", () => { + const account = stackers[0]; + const amount = getStackingMinimum() * 2n; + const maxAmount = amount * 2n; + allowContractCaller(`${deployer}.indirect`, null, address2); + delegateStx(amount, address2, null, account.btcAddr, account.stxAddress); + delegateStackStx( + account.stxAddress, + amount, + account.btcAddr, + 1000, + 6, + address2 + ); + + const poxAddr = poxAddressToTuple(account.btcAddr); + const rewardCycle = 1; + const period = 1; + const authId = 1; + const sigArgs = { + authId, + maxAmount, + rewardCycle, + period, + topic: Pox4SignatureTopic.AggregateCommit, + poxAddress: account.btcAddr, + signerPrivateKey: account.signerPrivKey, + }; + const signerSignature = account.client.signPoxSignature(sigArgs); + const signerKey = Cl.bufferFromHex(account.signerPubKey); + const response = simnet.callPublicFn( + "indirect", + "stack-aggregation-commit-indexed", + [ + poxAddr, + Cl.uint(rewardCycle), + Cl.some(Cl.bufferFromHex(signerSignature)), + signerKey, + Cl.uint(maxAmount), + Cl.uint(authId), + ], + address2 + ); + expect(response.result).toBeOk(Cl.uint(0)); + }); + + it("returns an error when called with no signature or prior authorization", () => { + const account = stackers[0]; + const amount = getStackingMinimum() * 2n; + const maxAmount = amount * 2n; + delegateStx(amount, address2, null, account.btcAddr, account.stxAddress); + delegateStackStx( + account.stxAddress, + amount, + account.btcAddr, + 1000, + 6, + address2 + ); + + const poxAddr = poxAddressToTuple(account.btcAddr); + const rewardCycle = 1; + const authId = 1; + const signerKey = Cl.bufferFromHex(account.signerPubKey); + const response = simnet.callPublicFn( + POX_CONTRACT, + "stack-aggregation-commit-indexed", + [ + poxAddr, + Cl.uint(rewardCycle), + Cl.none(), + signerKey, + Cl.uint(maxAmount), + Cl.uint(authId), + ], + address2 + ); + expect(response.result).toBeErr(Cl.int(ERRORS.ERR_NOT_ALLOWED)); + }); + + it("returns an error when the stacking threshold is not met", () => { + const account = stackers[0]; + const amount = getStackingMinimum() / 2n; + const maxAmount = amount * 4n; + delegateStx(amount, address2, null, account.btcAddr, account.stxAddress); + delegateStackStx( + account.stxAddress, + amount, + account.btcAddr, + 1000, + 6, + address2 + ); + + const poxAddr = poxAddressToTuple(account.btcAddr); + const rewardCycle = 1; + const period = 1; + const authId = 1; + const sigArgs = { + authId, + maxAmount, + rewardCycle, + period, + topic: Pox4SignatureTopic.AggregateCommit, + poxAddress: account.btcAddr, + signerPrivateKey: account.signerPrivKey, + }; + const signerSignature = account.client.signPoxSignature(sigArgs); + const signerKey = Cl.bufferFromHex(account.signerPubKey); + let response = simnet.callPublicFn( + POX_CONTRACT, + "stack-aggregation-commit-indexed", + [ + poxAddr, + Cl.uint(rewardCycle), + Cl.some(Cl.bufferFromHex(signerSignature)), + signerKey, + Cl.uint(maxAmount), + Cl.uint(authId), + ], + address2 + ); + expect(response.result).toBeErr( + Cl.int(ERRORS.ERR_STACKING_THRESHOLD_NOT_MET) + ); + }); +}); + +describe("test `stack-aggregation-commit`", () => { + it("returns `(ok uint)` on success", () => { + const account = stackers[0]; + const amount = getStackingMinimum() * 2n; + const maxAmount = amount * 2n; + delegateStx(amount, address2, null, account.btcAddr, account.stxAddress); + const { result } = delegateStackStx( + account.stxAddress, + amount, + account.btcAddr, + 1000, + 6, + address2 + ); + expect(result).toBeOk( + Cl.tuple({ + "lock-amount": Cl.uint(amount), + stacker: Cl.principal(account.stxAddress), + "unlock-burn-height": Cl.uint(7350), + }) + ); + + const poxAddr = poxAddressToTuple(account.btcAddr); + const rewardCycle = 1; + const period = 1; + const authId = 1; + const sigArgs = { + authId, + maxAmount, + rewardCycle, + period, + topic: Pox4SignatureTopic.AggregateCommit, + poxAddress: account.btcAddr, + signerPrivateKey: account.signerPrivKey, + }; + const signerSignature = account.client.signPoxSignature(sigArgs); + const signerKey = Cl.bufferFromHex(account.signerPubKey); + const response = simnet.callPublicFn( + POX_CONTRACT, + "stack-aggregation-commit", + [ + poxAddr, + Cl.uint(rewardCycle), + Cl.some(Cl.bufferFromHex(signerSignature)), + signerKey, + Cl.uint(maxAmount), + Cl.uint(authId), + ], + address2 + ); + expect(response.result).toBeOk(Cl.bool(true)); + }); + + it("returns an error when there is no partially stacked STX", () => { + const account = stackers[0]; + const amount = getStackingMinimum() * 2n; + const maxAmount = amount * 2n; + delegateStx(amount, address2, null, account.btcAddr, account.stxAddress); + + const poxAddr = poxAddressToTuple(account.btcAddr); + const rewardCycle = 1; + const period = 1; + const authId = 1; + const sigArgs = { + authId, + maxAmount, + rewardCycle, + period, + topic: Pox4SignatureTopic.AggregateCommit, + poxAddress: account.btcAddr, + signerPrivateKey: account.signerPrivKey, + }; + const signerSignature = account.client.signPoxSignature(sigArgs); + const signerKey = Cl.bufferFromHex(account.signerPubKey); + const response = simnet.callPublicFn( + POX_CONTRACT, + "stack-aggregation-commit", + [ + poxAddr, + Cl.uint(rewardCycle), + Cl.some(Cl.bufferFromHex(signerSignature)), + signerKey, + Cl.uint(maxAmount), + Cl.uint(authId), + ], + address2 + ); + expect(response.result).toBeErr( + Cl.int(ERRORS.ERR_STACKING_NO_SUCH_PRINCIPAL) + ); + }); + + it("returns an error when called by an unauthorized caller", () => { + const account = stackers[0]; + const amount = getStackingMinimum() * 2n; + const maxAmount = amount * 2n; + delegateStx(amount, address2, null, account.btcAddr, account.stxAddress); + delegateStackStx( + account.stxAddress, + amount, + account.btcAddr, + 1000, + 6, + address2 + ); + + const poxAddr = poxAddressToTuple(account.btcAddr); + const rewardCycle = 1; + const period = 1; + const authId = 1; + const sigArgs = { + authId, + maxAmount, + rewardCycle, + period, + topic: Pox4SignatureTopic.AggregateCommit, + poxAddress: account.btcAddr, + signerPrivateKey: account.signerPrivKey, + }; + const signerSignature = account.client.signPoxSignature(sigArgs); + const signerKey = Cl.bufferFromHex(account.signerPubKey); + const response = simnet.callPublicFn( + "indirect", + "stack-aggregation-commit", + [ + poxAddr, + Cl.uint(rewardCycle), + Cl.some(Cl.bufferFromHex(signerSignature)), + signerKey, + Cl.uint(maxAmount), + Cl.uint(authId), + ], + address2 + ); + expect(response.result).toBeErr( + Cl.int(ERRORS.ERR_STACKING_PERMISSION_DENIED) + ); + }); + + it("can be called indirectly by an authorized caller", () => { + const account = stackers[0]; + const amount = getStackingMinimum() * 2n; + const maxAmount = amount * 2n; + allowContractCaller(`${deployer}.indirect`, null, address2); + delegateStx(amount, address2, null, account.btcAddr, account.stxAddress); + delegateStackStx( + account.stxAddress, + amount, + account.btcAddr, + 1000, + 6, + address2 + ); + + const poxAddr = poxAddressToTuple(account.btcAddr); + const rewardCycle = 1; + const period = 1; + const authId = 1; + const sigArgs = { + authId, + maxAmount, + rewardCycle, + period, + topic: Pox4SignatureTopic.AggregateCommit, + poxAddress: account.btcAddr, + signerPrivateKey: account.signerPrivKey, + }; + const signerSignature = account.client.signPoxSignature(sigArgs); + const signerKey = Cl.bufferFromHex(account.signerPubKey); + const response = simnet.callPublicFn( + "indirect", + "stack-aggregation-commit", + [ + poxAddr, + Cl.uint(rewardCycle), + Cl.some(Cl.bufferFromHex(signerSignature)), + signerKey, + Cl.uint(maxAmount), + Cl.uint(authId), + ], + address2 + ); + expect(response.result).toBeOk(Cl.bool(true)); + }); + + it("returns an error when called with no signature or prior authorization", () => { + const account = stackers[0]; + const amount = getStackingMinimum() * 2n; + const maxAmount = amount * 2n; + delegateStx(amount, address2, null, account.btcAddr, account.stxAddress); + delegateStackStx( + account.stxAddress, + amount, + account.btcAddr, + 1000, + 6, + address2 + ); + + const poxAddr = poxAddressToTuple(account.btcAddr); + const rewardCycle = 1; + const authId = 1; + const signerKey = Cl.bufferFromHex(account.signerPubKey); + const response = simnet.callPublicFn( + POX_CONTRACT, + "stack-aggregation-commit", + [ + poxAddr, + Cl.uint(rewardCycle), + Cl.none(), + signerKey, + Cl.uint(maxAmount), + Cl.uint(authId), + ], + address2 + ); + expect(response.result).toBeErr(Cl.int(ERRORS.ERR_NOT_ALLOWED)); + }); + + it("returns an error when the stacking threshold is not met", () => { + const account = stackers[0]; + const amount = getStackingMinimum() / 2n; + const maxAmount = amount * 4n; + delegateStx(amount, address2, null, account.btcAddr, account.stxAddress); + delegateStackStx( + account.stxAddress, + amount, + account.btcAddr, + 1000, + 6, + address2 + ); + + const poxAddr = poxAddressToTuple(account.btcAddr); + const rewardCycle = 1; + const period = 1; + const authId = 1; + const sigArgs = { + authId, + maxAmount, + rewardCycle, + period, + topic: Pox4SignatureTopic.AggregateCommit, + poxAddress: account.btcAddr, + signerPrivateKey: account.signerPrivKey, + }; + const signerSignature = account.client.signPoxSignature(sigArgs); + const signerKey = Cl.bufferFromHex(account.signerPubKey); + let response = simnet.callPublicFn( + POX_CONTRACT, + "stack-aggregation-commit", + [ + poxAddr, + Cl.uint(rewardCycle), + Cl.some(Cl.bufferFromHex(signerSignature)), + signerKey, + Cl.uint(maxAmount), + Cl.uint(authId), + ], + address2 + ); + expect(response.result).toBeErr( + Cl.int(ERRORS.ERR_STACKING_THRESHOLD_NOT_MET) + ); + }); +}); + +describe("test `delegate-stack-increase`", () => { + it("returns `(ok )` on success", () => { + const account = stackers[0]; + const minAmount = getStackingMinimum(); + const amount = minAmount * 2n; + const maxAmount = minAmount * 4n; + const startBurnHeight = 1000; + const lockPeriod = 6; + + delegateStx(maxAmount, address2, null, account.btcAddr, account.stxAddress); + delegateStackStx( + account.stxAddress, + amount, + account.btcAddr, + startBurnHeight, + lockPeriod, + address2 + ); + + let response = delegateStackIncrease( + account.stxAddress, + account.btcAddr, + maxAmount - amount, + address2 + ); + expect(response.result).toBeOk( + Cl.tuple({ + stacker: Cl.principal(account.stxAddress), + "total-locked": Cl.uint(maxAmount), + }) + ); + }); + + it("can be called after committing", () => { + const account = stackers[0]; + const minAmount = getStackingMinimum(); + const amount = minAmount * 2n; + const maxAmount = minAmount * 4n; + const startBurnHeight = 1000; + const lockPeriod = 6; + const rewardCycle = 1; + const authId = 1; + + delegateStx(maxAmount, address2, null, account.btcAddr, account.stxAddress); + delegateStackStx( + account.stxAddress, + amount, + account.btcAddr, + startBurnHeight, + lockPeriod, + address2 + ); + + let response = stackAggregationCommitIndexed( + account, + rewardCycle, + maxAmount, + authId, + address2 + ); + expect(response.result.type).toBe(ClarityType.ResponseOk); + let index = ((response.result as ResponseCV).value as UIntCV).value; + + response = delegateStackIncrease( + account.stxAddress, + account.btcAddr, + maxAmount - amount, + address2 + ); + expect(response.result).toBeOk( + Cl.tuple({ + stacker: Cl.principal(account.stxAddress), + "total-locked": Cl.uint(maxAmount), + }) + ); + + // the amount in the reward set should not update until after + // the delegator calls `stack-aggregation-increase` + let info = simnet.callReadOnlyFn( + POX_CONTRACT, + "get-reward-set-pox-address", + [Cl.uint(rewardCycle), Cl.uint(index)], + address2 + ); + let tuple = (info.result as SomeCV).value as TupleCV; + expect(tuple.data["total-ustx"]).toBeUint(amount); + + response = stackAggregationIncrease( + account, + rewardCycle, + index, + maxAmount, + authId, + address2 + ); + expect(response.result).toBeOk(Cl.bool(true)); + + // check that the amount was increased + info = simnet.callReadOnlyFn( + POX_CONTRACT, + "get-reward-set-pox-address", + [Cl.uint(rewardCycle), Cl.uint(index)], + address2 + ); + tuple = (info.result as SomeCV).value as TupleCV; + expect(tuple.data["total-ustx"]).toBeUint(maxAmount); + }); + + it("cannot be called if not delegated", () => { + const account = stackers[0]; + const minAmount = getStackingMinimum(); + const amount = minAmount * 2n; + const maxAmount = minAmount * 4n; + + // Arithmetic underflow is not caught gracefully, so this triggers a runtime error. + // Preferably, it would return a `ERR_STACKING_NOT_DELEGATED` error. + expect(() => + delegateStackIncrease( + account.stxAddress, + account.btcAddr, + maxAmount - amount, + address2 + ) + ).toThrow(); + }); + + it("cannot be called if not stacked", () => { + const account = stackers[0]; + const minAmount = getStackingMinimum(); + const amount = minAmount * 2n; + const maxAmount = minAmount * 4n; + + delegateStx(maxAmount, address2, null, account.btcAddr, account.stxAddress); + + // Arithmetic underflow is not caught gracefully, so this triggers a runtime error. + // Preferably, it would return a `ERR_STACKING_NOT_DELEGATED` error. + expect(() => + delegateStackIncrease( + account.stxAddress, + account.btcAddr, + maxAmount - amount, + address2 + ) + ).toThrow(); + }); + + it("cannot be called in last cycle of delegation", () => { + const account = stackers[0]; + const minAmount = getStackingMinimum(); + const amount = minAmount * 2n; + const maxAmount = minAmount * 4n; + const startBurnHeight = 1000; + const lockPeriod = 6; + const poxInfo = getPoxInfo(); + const cycleLength = Number(poxInfo.rewardCycleLength); + + delegateStx(maxAmount, address2, null, account.btcAddr, account.stxAddress); + delegateStackStx( + account.stxAddress, + amount, + account.btcAddr, + startBurnHeight, + lockPeriod, + address2 + ); + + // mine enough blocks to reach the last cycle of the delegation + simnet.mineEmptyBlocks(6 * cycleLength); + + let response = delegateStackIncrease( + account.stxAddress, + account.btcAddr, + maxAmount - amount, + address2 + ); + expect(response.result).toBeErr( + Cl.int(ERRORS.ERR_STACKING_INVALID_LOCK_PERIOD) + ); + }); + + it("cannot be called after delegation has expired", () => { + const account = stackers[0]; + const minAmount = getStackingMinimum(); + const amount = minAmount * 2n; + const maxAmount = minAmount * 4n; + const startBurnHeight = 1000; + const lockPeriod = 6; + const poxInfo = getPoxInfo(); + const cycleLength = Number(poxInfo.rewardCycleLength); + + delegateStx(maxAmount, address2, null, account.btcAddr, account.stxAddress); + delegateStackStx( + account.stxAddress, + amount, + account.btcAddr, + startBurnHeight, + lockPeriod, + address2 + ); + + // mine enough blocks to end the delegation + simnet.mineEmptyBlocks(7 * cycleLength); + + // Arithmetic underflow is not caught gracefully, so this triggers a runtime error. + // Preferably, it would return a `ERR_STACKING_NOT_DELEGATED` error. + expect(() => + delegateStackIncrease( + account.stxAddress, + account.btcAddr, + maxAmount - amount, + address2 + ) + ).toThrow(); + }); + + it("requires a positive increase amount", () => { + const account = stackers[0]; + const minAmount = getStackingMinimum(); + const amount = minAmount * 2n; + const maxAmount = minAmount * 4n; + const startBurnHeight = 1000; + const lockPeriod = 6; + + delegateStx(maxAmount, address2, null, account.btcAddr, account.stxAddress); + delegateStackStx( + account.stxAddress, + amount, + account.btcAddr, + startBurnHeight, + lockPeriod, + address2 + ); + + let response = delegateStackIncrease( + account.stxAddress, + account.btcAddr, + 0, + address2 + ); + expect(response.result).toBeErr(Cl.int(ERRORS.ERR_STACKING_INVALID_AMOUNT)); + }); + + it("cannot be called indirectly by an unauthorized caller", () => { + const account = stackers[0]; + const minAmount = getStackingMinimum(); + const amount = minAmount * 2n; + const maxAmount = minAmount * 4n; + const startBurnHeight = 1000; + const lockPeriod = 6; + + delegateStx(maxAmount, address2, null, account.btcAddr, account.stxAddress); + delegateStackStx( + account.stxAddress, + amount, + account.btcAddr, + startBurnHeight, + lockPeriod, + address2 + ); + + const delegateStackIncreaseArgs = [ + Cl.principal(account.stxAddress), + poxAddressToTuple(account.btcAddr), + Cl.uint(maxAmount - amount), + ]; + let response = simnet.callPublicFn( + "indirect", + "delegate-stack-increase", + delegateStackIncreaseArgs, + address2 + ); + expect(response.result).toBeErr( + Cl.int(ERRORS.ERR_STACKING_PERMISSION_DENIED) + ); + }); + + it("can be called indirectly by an authorized caller", () => { + const account = stackers[0]; + const minAmount = getStackingMinimum(); + const amount = minAmount * 2n; + const maxAmount = minAmount * 4n; + const startBurnHeight = 1000; + const lockPeriod = 6; + + allowContractCaller(`${deployer}.indirect`, null, address2); + delegateStx(maxAmount, address2, null, account.btcAddr, account.stxAddress); + delegateStackStx( + account.stxAddress, + amount, + account.btcAddr, + startBurnHeight, + lockPeriod, + address2 + ); + + const delegateStackIncreaseArgs = [ + Cl.principal(account.stxAddress), + poxAddressToTuple(account.btcAddr), + Cl.uint(maxAmount - amount), + ]; + let response = simnet.callPublicFn( + "indirect", + "delegate-stack-increase", + delegateStackIncreaseArgs, + address2 + ); + expect(response.result).toBeOk( + Cl.tuple({ + stacker: Cl.principal(account.stxAddress), + "total-locked": Cl.uint(maxAmount), + }) + ); + }); + + it("cannot be called for a solo stacker", () => { + const account = stackers[0]; + const minAmount = getStackingMinimum(); + const amount = minAmount * 2n; + const maxAmount = minAmount * 4n; + const startBurnHeight = 1000; + const lockPeriod = 6; + const authId = 1; + + stackStx( + account, + amount, + startBurnHeight, + lockPeriod, + maxAmount, + authId, + account.stxAddress + ); + + let response = delegateStackIncrease( + account.stxAddress, + account.btcAddr, + maxAmount - amount, + address2 + ); + expect(response.result).toBeErr(Cl.int(ERRORS.ERR_STACKING_NOT_DELEGATED)); + }); + + it("can only be called by the delegate", () => { + const account = stackers[0]; + const minAmount = getStackingMinimum(); + const amount = minAmount * 2n; + const maxAmount = minAmount * 4n; + const startBurnHeight = 1000; + const lockPeriod = 6; + + delegateStx(maxAmount, address2, null, account.btcAddr, account.stxAddress); + delegateStackStx( + account.stxAddress, + amount, + account.btcAddr, + startBurnHeight, + lockPeriod, + address2 + ); + + let response = delegateStackIncrease( + account.stxAddress, + account.btcAddr, + maxAmount - amount, + address3 + ); + expect(response.result).toBeErr( + Cl.int(ERRORS.ERR_STACKING_PERMISSION_DENIED) + ); + }); + + it("can increase to the total account balance", () => { + const account = stackers[0]; + const minAmount = getStackingMinimum(); + const amount = minAmount * 2n; + const startBurnHeight = 1000; + const lockPeriod = 6; + const balance = simnet.getAssetsMap().get("STX")?.get(account.stxAddress)!; + + delegateStx(balance, address2, null, account.btcAddr, account.stxAddress); + delegateStackStx( + account.stxAddress, + amount, + account.btcAddr, + startBurnHeight, + lockPeriod, + address2 + ); + + let response = delegateStackIncrease( + account.stxAddress, + account.btcAddr, + balance - amount, + address2 + ); + expect(response.result).toBeOk( + Cl.tuple({ + stacker: Cl.principal(account.stxAddress), + "total-locked": Cl.uint(balance), + }) + ); + }); + + it("cannot increase to more than the total account balance", () => { + const account = stackers[0]; + const minAmount = getStackingMinimum(); + const amount = minAmount * 2n; + const startBurnHeight = 1000; + const lockPeriod = 6; + const balance = simnet.getAssetsMap().get("STX")?.get(account.stxAddress)!; + + delegateStx(balance, address2, null, account.btcAddr, account.stxAddress); + delegateStackStx( + account.stxAddress, + amount, + account.btcAddr, + startBurnHeight, + lockPeriod, + address2 + ); + + let response = delegateStackIncrease( + account.stxAddress, + account.btcAddr, + balance - amount + 1n, + address2 + ); + expect(response.result).toBeErr( + Cl.int(ERRORS.ERR_STACKING_INSUFFICIENT_FUNDS) + ); + }); + + it("cannot increase to more than the delegated amount", () => { + const account = stackers[0]; + const minAmount = getStackingMinimum(); + const amount = minAmount * 2n; + const maxAmount = minAmount * 4n; + const startBurnHeight = 1000; + const lockPeriod = 6; + + delegateStx(maxAmount, address2, null, account.btcAddr, account.stxAddress); + delegateStackStx( + account.stxAddress, + amount, + account.btcAddr, + startBurnHeight, + lockPeriod, + address2 + ); + + let response = delegateStackIncrease( + account.stxAddress, + account.btcAddr, + maxAmount, + address2 + ); + expect(response.result).toBeErr( + Cl.int(ERRORS.ERR_DELEGATION_TOO_MUCH_LOCKED) + ); + }); +}); + +describe("test `stack-aggregation-increase`", () => { + it("returns `(ok uint)` and increases stacked amount on success", () => { + const account = stackers[0]; + const minAmount = getStackingMinimum(); + const amount = minAmount * 2n; + const maxAmount = minAmount * 4n; + const rewardCycle = 1; + const authId = 1; + + delegateStx(maxAmount, address2, null, account.btcAddr, account.stxAddress); + delegateStackStx( + account.stxAddress, + amount, + account.btcAddr, + 1000, + 6, + address2 + ); + + let response = stackAggregationCommitIndexed( + account, + rewardCycle, + maxAmount, + authId, + address2 + ); + expect(response.result.type).toBe(ClarityType.ResponseOk); + let index = ((response.result as ResponseCV).value as UIntCV).value; + + delegateStackIncrease( + account.stxAddress, + account.btcAddr, + maxAmount - amount, + address2 + ); + + // check the amount in the reward set + let info = simnet.callReadOnlyFn( + POX_CONTRACT, + "get-reward-set-pox-address", + [Cl.uint(rewardCycle), Cl.uint(index)], + address2 + ); + let tuple = (info.result as SomeCV).value as TupleCV; + expect(tuple.data["total-ustx"]).toBeUint(amount); + + response = stackAggregationIncrease( + account, + rewardCycle, + index, + maxAmount, + authId, + address2 + ); + expect(response.result).toBeOk(Cl.bool(true)); + + // check that the amount was increased + info = simnet.callReadOnlyFn( + POX_CONTRACT, + "get-reward-set-pox-address", + [Cl.uint(rewardCycle), Cl.uint(index)], + address2 + ); + tuple = (info.result as SomeCV).value as TupleCV; + expect(tuple.data["total-ustx"]).toBeUint(maxAmount); + }); + + it("cannot be called indirectly from unauthorized caller", () => { + const account = stackers[0]; + const minAmount = getStackingMinimum(); + const amount = minAmount * 2n; + const maxAmount = minAmount * 4n; + const rewardCycle = 1; + const period = 1; + const authId = 1; + + delegateStx(maxAmount, address2, null, account.btcAddr, account.stxAddress); + delegateStackStx( + account.stxAddress, + amount, + account.btcAddr, + 1000, + 6, + address2 + ); + + let response = stackAggregationCommitIndexed( + account, + rewardCycle, + maxAmount, + authId, + address2 + ); + expect(response.result.type).toBe(ClarityType.ResponseOk); + let index = ((response.result as ResponseCV).value as UIntCV).value; + + delegateStackIncrease( + account.stxAddress, + account.btcAddr, + maxAmount - amount, + address2 + ); + + const sigArgs = { + authId, + maxAmount, + rewardCycle: Number(rewardCycle), + period: Number(period), + topic: Pox4SignatureTopic.AggregateIncrease, + poxAddress: account.btcAddr, + signerPrivateKey: account.signerPrivKey, + }; + const signerSignature = account.client.signPoxSignature(sigArgs); + const signerKey = Cl.bufferFromHex(account.signerPubKey); + + const args = [ + poxAddressToTuple(account.btcAddr), + Cl.uint(rewardCycle), + Cl.uint(index), + Cl.some(Cl.bufferFromHex(signerSignature)), + signerKey, + Cl.uint(maxAmount), + Cl.uint(authId), + ]; + + response = simnet.callPublicFn( + "indirect", + "stack-aggregation-increase", + args, + address2 + ); + expect(response.result).toBeErr( + Cl.int(ERRORS.ERR_STACKING_PERMISSION_DENIED) + ); + + // check that the amount was not increased + const info = simnet.callReadOnlyFn( + POX_CONTRACT, + "get-reward-set-pox-address", + [Cl.uint(rewardCycle), Cl.uint(index)], + address2 + ); + const tuple = (info.result as SomeCV).value as TupleCV; + expect(tuple.data["total-ustx"]).toBeUint(amount); + }); + + it("can be called indirectly from an authorized caller", () => { + const account = stackers[0]; + const minAmount = getStackingMinimum(); + const amount = minAmount * 2n; + const maxAmount = minAmount * 4n; + const rewardCycle = 1; + const period = 1; + const authId = 1; + + allowContractCaller(`${deployer}.indirect`, null, address2); + + delegateStx(maxAmount, address2, null, account.btcAddr, account.stxAddress); + delegateStackStx( + account.stxAddress, + amount, + account.btcAddr, + 1000, + 6, + address2 + ); + + let response = stackAggregationCommitIndexed( + account, + rewardCycle, + maxAmount, + authId, + address2 + ); + expect(response.result.type).toBe(ClarityType.ResponseOk); + let index = ((response.result as ResponseCV).value as UIntCV).value; + + delegateStackIncrease( + account.stxAddress, + account.btcAddr, + maxAmount - amount, + address2 + ); + + const sigArgs = { + authId, + maxAmount, + rewardCycle: Number(rewardCycle), + period: Number(period), + topic: Pox4SignatureTopic.AggregateIncrease, + poxAddress: account.btcAddr, + signerPrivateKey: account.signerPrivKey, + }; + const signerSignature = account.client.signPoxSignature(sigArgs); + const signerKey = Cl.bufferFromHex(account.signerPubKey); + + const args = [ + poxAddressToTuple(account.btcAddr), + Cl.uint(rewardCycle), + Cl.uint(index), + Cl.some(Cl.bufferFromHex(signerSignature)), + signerKey, + Cl.uint(maxAmount), + Cl.uint(authId), + ]; + + response = simnet.callPublicFn( + "indirect", + "stack-aggregation-increase", + args, + address2 + ); + expect(response.result).toBeOk(Cl.bool(true)); + + // check that the amount was increased + const info = simnet.callReadOnlyFn( + POX_CONTRACT, + "get-reward-set-pox-address", + [Cl.uint(rewardCycle), Cl.uint(index)], + address2 + ); + const tuple = (info.result as SomeCV).value as TupleCV; + expect(tuple.data["total-ustx"]).toBeUint(maxAmount); + }); + + it("returns an error for current reward cycle", () => { + const account = stackers[0]; + const minAmount = getStackingMinimum(); + const amount = minAmount * 2n; + const maxAmount = minAmount * 4n; + const rewardCycle = 1; + const authId = 1; + + delegateStx(maxAmount, address2, null, account.btcAddr, account.stxAddress); + delegateStackStx( + account.stxAddress, + amount, + account.btcAddr, + 1000, + 6, + address2 + ); + + let response = stackAggregationCommitIndexed( + account, + rewardCycle, + maxAmount, + authId, + address2 + ); + expect(response.result.type).toBe(ClarityType.ResponseOk); + let index = ((response.result as ResponseCV).value as UIntCV).value; + + delegateStackIncrease( + account.stxAddress, + account.btcAddr, + maxAmount - amount, + address2 + ); + + simnet.mineEmptyBlocks(1100); + + response = stackAggregationIncrease( + account, + rewardCycle, + index, + maxAmount, + authId, + address2 + ); + expect(response.result).toBeErr( + Cl.int(ERRORS.ERR_STACKING_INVALID_LOCK_PERIOD) + ); + + // check that the amount was not increased + const info = simnet.callReadOnlyFn( + POX_CONTRACT, + "get-reward-set-pox-address", + [Cl.uint(rewardCycle), Cl.uint(index)], + address2 + ); + const tuple = (info.result as SomeCV).value as TupleCV; + expect(tuple.data["total-ustx"]).toBeUint(amount); + }); + + it("returns an error for switching pox address", () => { + const account = stackers[0]; + const minAmount = getStackingMinimum(); + const amount = minAmount * 2n; + const maxAmount = minAmount * 4n; + const rewardCycle = 1; + const period = 1; + const authId = 1; + + delegateStx(maxAmount, address2, null, account.btcAddr, account.stxAddress); + delegateStackStx( + account.stxAddress, + amount, + account.btcAddr, + 1000, + 6, + address2 + ); + + let response = stackAggregationCommitIndexed( + account, + rewardCycle, + maxAmount, + authId, + address2 + ); + expect(response.result.type).toBe(ClarityType.ResponseOk); + let index = ((response.result as ResponseCV).value as UIntCV).value; + + delegateStackIncrease( + account.stxAddress, + account.btcAddr, + maxAmount - amount, + address2 + ); + + const sigArgs = { + authId, + maxAmount, + rewardCycle: Number(rewardCycle), + period: Number(period), + topic: Pox4SignatureTopic.AggregateIncrease, + poxAddress: stackers[1].btcAddr, + signerPrivateKey: account.signerPrivKey, + }; + const signerSignature = account.client.signPoxSignature(sigArgs); + const signerKey = Cl.bufferFromHex(account.signerPubKey); + + const args = [ + poxAddressToTuple(stackers[1].btcAddr), + Cl.uint(rewardCycle), + Cl.uint(index), + Cl.some(Cl.bufferFromHex(signerSignature)), + signerKey, + Cl.uint(maxAmount), + Cl.uint(authId), + ]; + response = simnet.callPublicFn( + POX_CONTRACT, + "stack-aggregation-increase", + args, + address2 + ); + // Note: I don't think it is possible to reach the `ERR_DELEGATION_WRONG_REWARD_SLOT` error + expect(response.result).toBeErr( + Cl.int(ERRORS.ERR_STACKING_NO_SUCH_PRINCIPAL) + ); + + // check that the amount was not increased + const info = simnet.callReadOnlyFn( + POX_CONTRACT, + "get-reward-set-pox-address", + [Cl.uint(rewardCycle), Cl.uint(index)], + address2 + ); + const tuple = (info.result as SomeCV).value as TupleCV; + expect(tuple.data["total-ustx"]).toBeUint(amount); + }); + + it("cannot increase more than the authorized amount", () => { + const account = stackers[0]; + const minAmount = getStackingMinimum(); + const amount = minAmount * 2n; + const authAmount = minAmount * 3n; + const maxAmount = minAmount * 4n; + const rewardCycle = 1; + const authId = 1; + + delegateStx(maxAmount, address2, null, account.btcAddr, account.stxAddress); + delegateStackStx( + account.stxAddress, + amount, + account.btcAddr, + 1000, + 6, + address2 + ); + + let response = stackAggregationCommitIndexed( + account, + rewardCycle, + authAmount, + authId, + address2 + ); + expect(response.result.type).toBe(ClarityType.ResponseOk); + let index = ((response.result as ResponseCV).value as UIntCV).value; + + delegateStackIncrease( + account.stxAddress, + account.btcAddr, + maxAmount - amount, + address2 + ); + + // check the amount in the reward set + let info = simnet.callReadOnlyFn( + POX_CONTRACT, + "get-reward-set-pox-address", + [Cl.uint(rewardCycle), Cl.uint(index)], + address2 + ); + let tuple = (info.result as SomeCV).value as TupleCV; + expect(tuple.data["total-ustx"]).toBeUint(amount); + + response = stackAggregationIncrease( + account, + rewardCycle, + index, + authAmount, + authId, + address2 + ); + expect(response.result).toBeErr( + Cl.int(ERRORS.ERR_SIGNER_AUTH_AMOUNT_TOO_HIGH) + ); + + // check that the amount was not increased + info = simnet.callReadOnlyFn( + POX_CONTRACT, + "get-reward-set-pox-address", + [Cl.uint(rewardCycle), Cl.uint(index)], + address2 + ); + tuple = (info.result as SomeCV).value as TupleCV; + expect(tuple.data["total-ustx"]).toBeUint(amount); + }); + + it("cannot change signers", () => { + const account = stackers[0]; + const account1 = stackers[1]; + const minAmount = getStackingMinimum(); + const amount = minAmount * 2n; + const authAmount = minAmount * 3n; + const maxAmount = minAmount * 4n; + const rewardCycle = 1; + const period = 1; + const authId = 1; + + delegateStx(maxAmount, address2, null, account.btcAddr, account.stxAddress); + delegateStackStx( + account.stxAddress, + amount, + account.btcAddr, + 1000, + 6, + address2 + ); + + let response = stackAggregationCommitIndexed( + account, + rewardCycle, + authAmount, + authId, + address2 + ); + expect(response.result.type).toBe(ClarityType.ResponseOk); + let index = ((response.result as ResponseCV).value as UIntCV).value; + + delegateStackIncrease( + account.stxAddress, + account.btcAddr, + maxAmount - amount, + address2 + ); + + const sigArgs = { + authId, + maxAmount, + rewardCycle: Number(rewardCycle), + period: Number(period), + topic: Pox4SignatureTopic.AggregateIncrease, + poxAddress: account.btcAddr, + signerPrivateKey: account1.signerPrivKey, + }; + const signerSignature = account1.client.signPoxSignature(sigArgs); + const signerKey = Cl.bufferFromHex(account1.signerPubKey); + + const args = [ + poxAddressToTuple(account.btcAddr), + Cl.uint(rewardCycle), + Cl.uint(index), + Cl.some(Cl.bufferFromHex(signerSignature)), + signerKey, + Cl.uint(maxAmount), + Cl.uint(authId), + ]; + response = simnet.callPublicFn( + POX_CONTRACT, + "stack-aggregation-increase", + args, + address2 + ); + expect(response.result).toBeErr(Cl.int(ERRORS.ERR_INVALID_SIGNER_KEY)); + + // check that the amount was not increased + const info = simnet.callReadOnlyFn( + POX_CONTRACT, + "get-reward-set-pox-address", + [Cl.uint(rewardCycle), Cl.uint(index)], + address2 + ); + const tuple = (info.result as SomeCV).value as TupleCV; + expect(tuple.data["total-ustx"]).toBeUint(amount); + }); +}); + +describe("test `delegate-stack-extend`", () => { + it("returns `(ok )` on success", () => { + const account = stackers[0]; + const amount = getStackingMinimum() * 2n; + const maxAmount = amount * 2n; + + delegateStx(maxAmount, address2, null, account.btcAddr, account.stxAddress); + delegateStackStx( + account.stxAddress, + amount, + account.btcAddr, + 1000, + 1, + address2 + ); + + let response = delegateStackExtend( + account.stxAddress, + account.btcAddr, + 6, + address2 + ); + // unlock height should be cycle 8: 8 * 1050 = 8400 + expect(response.result).toBeOk( + Cl.tuple({ + stacker: Cl.principal(account.stxAddress), + "unlock-burn-height": Cl.uint(8400), + }) + ); + + const info = getStackerInfo(account.stxAddress); + expect(info.result.type).toBe(ClarityType.OptionalSome); + const tuple = (info.result as SomeCV).value as TupleCV; + expect(tuple.data["first-reward-cycle"]).toBeUint(1); + expect(tuple.data["lock-period"]).toBeUint(7); + }); + + it("can extend after commit", () => { + const account = stackers[0]; + const amount = getStackingMinimum() * 2n; + const maxAmount = amount * 2n; + const rewardCycle = 1; + const authId = 1; + + delegateStx(maxAmount, address2, null, account.btcAddr, account.stxAddress); + delegateStackStx( + account.stxAddress, + amount, + account.btcAddr, + 1000, + 1, + address2 + ); + stackAggregationCommitIndexed( + account, + rewardCycle, + maxAmount, + authId, + address2 + ); + + let response = delegateStackExtend( + account.stxAddress, + account.btcAddr, + 6, + address2 + ); + // unlock height should be cycle 8: 8 * 1050 = 8400 + expect(response.result).toBeOk( + Cl.tuple({ + stacker: Cl.principal(account.stxAddress), + "unlock-burn-height": Cl.uint(8400), + }) + ); + + const info = getStackerInfo(account.stxAddress); + expect(info.result.type).toBe(ClarityType.OptionalSome); + const tuple = (info.result as SomeCV).value as TupleCV; + expect(tuple.data["first-reward-cycle"]).toBeUint(1); + expect(tuple.data["lock-period"]).toBeUint(7); + }); + + it("can extend after lock has started", () => { + const account = stackers[0]; + const amount = getStackingMinimum() * 2n; + const maxAmount = amount * 2n; + const rewardCycle = 1; + const authId = 1; + + delegateStx(maxAmount, address2, null, account.btcAddr, account.stxAddress); + delegateStackStx( + account.stxAddress, + amount, + account.btcAddr, + 1000, + 1, + address2 + ); + stackAggregationCommitIndexed( + account, + rewardCycle, + maxAmount, + authId, + address2 + ); + + simnet.mineEmptyBlocks(1100); + + let response = delegateStackExtend( + account.stxAddress, + account.btcAddr, + 6, + address2 + ); + // unlock height should be cycle 8: 8 * 1050 = 8400 + expect(response.result).toBeOk( + Cl.tuple({ + stacker: Cl.principal(account.stxAddress), + "unlock-burn-height": Cl.uint(8400), + }) + ); + + const info = getStackerInfo(account.stxAddress); + expect(info.result.type).toBe(ClarityType.OptionalSome); + const tuple = (info.result as SomeCV).value as TupleCV; + expect(tuple.data["first-reward-cycle"]).toBeUint(1); + expect(tuple.data["lock-period"]).toBeUint(7); + }); + + it("can extend multiple times", () => { + const account = stackers[0]; + const amount = getStackingMinimum() * 2n; + const maxAmount = amount * 2n; + + delegateStx(maxAmount, address2, null, account.btcAddr, account.stxAddress); + delegateStackStx( + account.stxAddress, + amount, + account.btcAddr, + 1000, + 1, + address2 + ); + + let response = delegateStackExtend( + account.stxAddress, + account.btcAddr, + 2, + address2 + ); + // unlock height should be cycle 4: 4 * 1050 = 4200 + expect(response.result).toBeOk( + Cl.tuple({ + stacker: Cl.principal(account.stxAddress), + "unlock-burn-height": Cl.uint(4200), + }) + ); + + response = delegateStackExtend( + account.stxAddress, + account.btcAddr, + 3, + address2 + ); + // unlock height should be cycle 7: 7 * 1050 = 7350 + expect(response.result).toBeOk( + Cl.tuple({ + stacker: Cl.principal(account.stxAddress), + "unlock-burn-height": Cl.uint(7350), + }) + ); + + const info = getStackerInfo(account.stxAddress); + expect(info.result.type).toBe(ClarityType.OptionalSome); + const tuple = (info.result as SomeCV).value as TupleCV; + expect(tuple.data["first-reward-cycle"]).toBeUint(1); + expect(tuple.data["lock-period"]).toBeUint(6); + }); + + it("can extend multiple times while locked", () => { + const account = stackers[0]; + const amount = getStackingMinimum() * 2n; + const maxAmount = amount * 2n; + + delegateStx(maxAmount, address2, null, account.btcAddr, account.stxAddress); + delegateStackStx( + account.stxAddress, + amount, + account.btcAddr, + 1000, + 1, + address2 + ); + + simnet.mineEmptyBlocks(1100); + + let response = delegateStackExtend( + account.stxAddress, + account.btcAddr, + 2, + address2 + ); + // unlock height should be cycle 4: 4 * 1050 = 4200 + expect(response.result).toBeOk( + Cl.tuple({ + stacker: Cl.principal(account.stxAddress), + "unlock-burn-height": Cl.uint(4200), + }) + ); + + simnet.mineEmptyBlocks(3000); + + response = delegateStackExtend( + account.stxAddress, + account.btcAddr, + 3, + address2 + ); + // unlock height should be cycle 7: 7 * 1050 = 7350 + expect(response.result).toBeOk( + Cl.tuple({ + stacker: Cl.principal(account.stxAddress), + "unlock-burn-height": Cl.uint(7350), + }) + ); + + const info = getStackerInfo(account.stxAddress); + expect(info.result.type).toBe(ClarityType.OptionalSome); + const tuple = (info.result as SomeCV).value as TupleCV; + expect(tuple.data["first-reward-cycle"]).toBeUint(3); + expect(tuple.data["lock-period"]).toBeUint(4); + }); + + it("cannot extend 0 cycles", () => { + const account = stackers[0]; + const amount = getStackingMinimum() * 2n; + const maxAmount = amount * 2n; + + delegateStx(maxAmount, address2, null, account.btcAddr, account.stxAddress); + delegateStackStx( + account.stxAddress, + amount, + account.btcAddr, + 1000, + 1, + address2 + ); + + let response = delegateStackExtend( + account.stxAddress, + account.btcAddr, + 0, + address2 + ); + + expect(response.result).toBeErr( + Cl.int(ERRORS.ERR_STACKING_INVALID_LOCK_PERIOD) + ); + + const info = getStackerInfo(account.stxAddress); + expect(info.result.type).toBe(ClarityType.OptionalSome); + const tuple = (info.result as SomeCV).value as TupleCV; + expect(tuple.data["first-reward-cycle"]).toBeUint(1); + expect(tuple.data["lock-period"]).toBeUint(1); + }); + + it("cannot extend beyond 12 cycles", () => { + const account = stackers[0]; + const amount = getStackingMinimum() * 2n; + const maxAmount = amount * 2n; + + delegateStx(maxAmount, address2, null, account.btcAddr, account.stxAddress); + delegateStackStx( + account.stxAddress, + amount, + account.btcAddr, + 1000, + 1, + address2 + ); + + let response = delegateStackExtend( + account.stxAddress, + account.btcAddr, + 12, + address2 + ); + + expect(response.result).toBeErr( + Cl.int(ERRORS.ERR_STACKING_INVALID_LOCK_PERIOD) + ); + + const info = getStackerInfo(account.stxAddress); + expect(info.result.type).toBe(ClarityType.OptionalSome); + const tuple = (info.result as SomeCV).value as TupleCV; + expect(tuple.data["first-reward-cycle"]).toBeUint(1); + expect(tuple.data["lock-period"]).toBeUint(1); + }); + + it("cannot be called indirectly by an unauthorized caller", () => { + const account = stackers[0]; + const amount = getStackingMinimum() * 2n; + const maxAmount = amount * 2n; + + delegateStx(maxAmount, address2, null, account.btcAddr, account.stxAddress); + delegateStackStx( + account.stxAddress, + amount, + account.btcAddr, + 1000, + 1, + address2 + ); + + const delegateStackExtendArgs = [ + Cl.principal(account.stxAddress), + poxAddressToTuple(account.btcAddr), + Cl.uint(6), + ]; + let response = simnet.callPublicFn( + "indirect", + "delegate-stack-extend", + delegateStackExtendArgs, + address2 + ); + + expect(response.result).toBeErr( + Cl.int(ERRORS.ERR_STACKING_PERMISSION_DENIED) + ); + + const info = getStackerInfo(account.stxAddress); + expect(info.result.type).toBe(ClarityType.OptionalSome); + const tuple = (info.result as SomeCV).value as TupleCV; + expect(tuple.data["first-reward-cycle"]).toBeUint(1); + expect(tuple.data["lock-period"]).toBeUint(1); + }); + + it("can be called indirectly by an authorized caller", () => { + const account = stackers[0]; + const amount = getStackingMinimum() * 2n; + const maxAmount = amount * 2n; + + allowContractCaller(`${deployer}.indirect`, null, address2); + delegateStx(maxAmount, address2, null, account.btcAddr, account.stxAddress); + delegateStackStx( + account.stxAddress, + amount, + account.btcAddr, + 1000, + 1, + address2 + ); + + const delegateStackExtendArgs = [ + Cl.principal(account.stxAddress), + poxAddressToTuple(account.btcAddr), + Cl.uint(6), + ]; + let response = simnet.callPublicFn( + "indirect", + "delegate-stack-extend", + delegateStackExtendArgs, + address2 + ); + + // unlock height should be cycle 8: 8 * 1050 = 84000 + expect(response.result).toBeOk( + Cl.tuple({ + stacker: Cl.principal(account.stxAddress), + "unlock-burn-height": Cl.uint(8400), + }) + ); + + const info = getStackerInfo(account.stxAddress); + expect(info.result.type).toBe(ClarityType.OptionalSome); + const tuple = (info.result as SomeCV).value as TupleCV; + expect(tuple.data["first-reward-cycle"]).toBeUint(1); + expect(tuple.data["lock-period"]).toBeUint(7); + }); + + it("cannot extend if not locked", () => { + const account = stackers[0]; + const amount = getStackingMinimum() * 2n; + const maxAmount = amount * 2n; + + delegateStx(maxAmount, address2, null, account.btcAddr, account.stxAddress); + + let response = delegateStackExtend( + account.stxAddress, + account.btcAddr, + 6, + address2 + ); + expect(response.result).toBeErr(Cl.int(ERRORS.ERR_STACK_EXTEND_NOT_LOCKED)); + + const info = getStackerInfo(account.stxAddress); + expect(info.result.type).toBe(ClarityType.OptionalNone); + }); + + it("cannot extend after lock has expired", () => { + const account = stackers[0]; + const amount = getStackingMinimum() * 2n; + const maxAmount = amount * 2n; + const rewardCycle = 1; + const period = 1; + const authId = 1; + + delegateStx(maxAmount, address2, null, account.btcAddr, account.stxAddress); + delegateStackStx( + account.stxAddress, + amount, + account.btcAddr, + 1000, + period, + address2 + ); + stackAggregationCommitIndexed( + account, + rewardCycle, + maxAmount, + authId, + address2 + ); + + simnet.mineEmptyBlocks(2200); + + let response = delegateStackExtend( + account.stxAddress, + account.btcAddr, + 6, + address2 + ); + expect(response.result).toBeErr(Cl.int(ERRORS.ERR_STACK_EXTEND_NOT_LOCKED)); + + const info = getStackerInfo(account.stxAddress); + expect(info.result).toBeNone(); + }); + + it("cannot extend at unlock height", () => { + const account = stackers[0]; + const amount = getStackingMinimum() * 2n; + const maxAmount = amount * 2n; + const rewardCycle = 1; + const period = 1; + const authId = 1; + + delegateStx(maxAmount, address2, null, account.btcAddr, account.stxAddress); + delegateStackStx( + account.stxAddress, + amount, + account.btcAddr, + 1000, + period, + address2 + ); + stackAggregationCommitIndexed( + account, + rewardCycle, + maxAmount, + authId, + address2 + ); + + // mine until the unlock height + simnet.mineEmptyBlocks(2100 - simnet.blockHeight); + + let response = delegateStackExtend( + account.stxAddress, + account.btcAddr, + 6, + address2 + ); + expect(response.result).toBeErr(Cl.int(ERRORS.ERR_STACK_EXTEND_NOT_LOCKED)); + + const info = getStackerInfo(account.stxAddress); + expect(info.result).toBeNone(); + }); + + it("cannot extend a solo-stacked stacker", () => { + const account = stackers[0]; + const amount = getStackingMinimum() * 2n; + const maxAmount = amount * 2n; + const period = 1; + const authId = 1; + + stackStx( + account, + amount, + 1000, + period, + maxAmount, + authId, + account.stxAddress + ); + + let response = delegateStackExtend( + account.stxAddress, + account.btcAddr, + 6, + account.stxAddress + ); + expect(response.result).toBeErr(Cl.int(ERRORS.ERR_STACKING_NOT_DELEGATED)); + + const info = getStackerInfo(account.stxAddress); + expect(info.result.type).toBe(ClarityType.OptionalSome); + const tuple = (info.result as SomeCV).value as TupleCV; + expect(tuple.data["first-reward-cycle"]).toBeUint(1); + expect(tuple.data["lock-period"]).toBeUint(1); + }); + + it("cannot extend a stacker not delegated to the caller", () => { + const account = stackers[0]; + const amount = getStackingMinimum() * 2n; + const maxAmount = amount * 2n; + + delegateStx(maxAmount, address2, null, account.btcAddr, account.stxAddress); + delegateStackStx( + account.stxAddress, + amount, + account.btcAddr, + 1000, + 1, + address2 + ); + + let response = delegateStackExtend( + account.stxAddress, + account.btcAddr, + 6, + address3 + ); + // unlock height should be cycle 8: 8 * 1050 = 8400 + expect(response.result).toBeErr( + Cl.int(ERRORS.ERR_STACKING_PERMISSION_DENIED) + ); + + const info = getStackerInfo(account.stxAddress); + expect(info.result.type).toBe(ClarityType.OptionalSome); + const tuple = (info.result as SomeCV).value as TupleCV; + expect(tuple.data["first-reward-cycle"]).toBeUint(1); + expect(tuple.data["lock-period"]).toBeUint(1); + }); + + it("cannot extend to a different pox addr", () => { + const account = stackers[0]; + const amount = getStackingMinimum() * 2n; + const maxAmount = amount * 2n; + + delegateStx(maxAmount, address2, null, account.btcAddr, account.stxAddress); + delegateStackStx( + account.stxAddress, + amount, + account.btcAddr, + 1000, + 1, + address2 + ); + + let response = delegateStackExtend( + account.stxAddress, + stackers[1].btcAddr, + 6, + address2 + ); + // unlock height should be cycle 8: 8 * 1050 = 8400 + expect(response.result).toBeErr( + Cl.int(ERRORS.ERR_DELEGATION_POX_ADDR_REQUIRED) + ); + + const info = getStackerInfo(account.stxAddress); + expect(info.result.type).toBe(ClarityType.OptionalSome); + const tuple = (info.result as SomeCV).value as TupleCV; + expect(tuple.data["first-reward-cycle"]).toBeUint(1); + expect(tuple.data["lock-period"]).toBeUint(1); + }); + + it("can extend to a different pox addr if one was not specified", () => { + const account = stackers[0]; + const amount = getStackingMinimum() * 2n; + const maxAmount = amount * 2n; + + delegateStx(maxAmount, address2, null, null, account.stxAddress); + delegateStackStx( + account.stxAddress, + amount, + account.btcAddr, + 1000, + 1, + address2 + ); + + let response = delegateStackExtend( + account.stxAddress, + stackers[1].btcAddr, + 6, + address2 + ); + // unlock height should be cycle 8: 8 * 1050 = 8400 + expect(response.result).toBeOk( + Cl.tuple({ + stacker: Cl.principal(account.stxAddress), + "unlock-burn-height": Cl.uint(8400), + }) + ); + + const info = getStackerInfo(account.stxAddress); + expect(info.result.type).toBe(ClarityType.OptionalSome); + const tuple = (info.result as SomeCV).value as TupleCV; + expect(tuple.data["first-reward-cycle"]).toBeUint(1); + expect(tuple.data["lock-period"]).toBeUint(7); + }); + + it("can extend within the delegation window", () => { + const account = stackers[0]; + const amount = getStackingMinimum() * 2n; + const maxAmount = amount * 2n; + + delegateStx(maxAmount, address2, 5250, account.btcAddr, account.stxAddress); + delegateStackStx( + account.stxAddress, + amount, + account.btcAddr, + 1000, + 1, + address2 + ); + + let response = delegateStackExtend( + account.stxAddress, + account.btcAddr, + 3, + address2 + ); + // unlock height should be cycle 5: 5 * 1050 = 5250 + expect(response.result).toBeOk( + Cl.tuple({ + stacker: Cl.principal(account.stxAddress), + "unlock-burn-height": Cl.uint(5250), + }) + ); + + const info = getStackerInfo(account.stxAddress); + expect(info.result.type).toBe(ClarityType.OptionalSome); + const tuple = (info.result as SomeCV).value as TupleCV; + expect(tuple.data["first-reward-cycle"]).toBeUint(1); + expect(tuple.data["lock-period"]).toBeUint(4); + }); + + it("cannot extend outside the delegation window", () => { + const account = stackers[0]; + const amount = getStackingMinimum() * 2n; + const maxAmount = amount * 2n; + + delegateStx(maxAmount, address2, 5249, account.btcAddr, account.stxAddress); + delegateStackStx( + account.stxAddress, + amount, + account.btcAddr, + 1000, + 1, + address2 + ); + + let response = delegateStackExtend( + account.stxAddress, + account.btcAddr, + 3, + address2 + ); + expect(response.result).toBeErr( + Cl.int(ERRORS.ERR_DELEGATION_EXPIRES_DURING_LOCK) + ); + + const info = getStackerInfo(account.stxAddress); + expect(info.result.type).toBe(ClarityType.OptionalSome); + const tuple = (info.result as SomeCV).value as TupleCV; + expect(tuple.data["first-reward-cycle"]).toBeUint(1); + expect(tuple.data["lock-period"]).toBeUint(1); + }); +}); + +describe("test `get-partial-stacked-by-cycle`", () => { + it("returns the correct amount", () => { + const account = stackers[0]; + const amount = getStackingMinimum() * 2n; + const maxAmount = amount * 2n; + const rewardCycle = 1; + + delegateStx(maxAmount, address2, null, account.btcAddr, account.stxAddress); + delegateStackStx( + account.stxAddress, + amount, + account.btcAddr, + 1000, + 1, + address2 + ); + + const info = simnet.callReadOnlyFn( + POX_CONTRACT, + "get-partial-stacked-by-cycle", + [ + poxAddressToTuple(account.btcAddr), + Cl.uint(rewardCycle), + Cl.principal(address2), + ], + address2 + ); + expect(info.result).toBeSome( + Cl.tuple({ + "stacked-amount": Cl.uint(amount), + }) + ); + }); + + it("returns `none` when there are no partially stacked STX", () => { + const account = stackers[0]; + const rewardCycle = 1; + + const info = simnet.callReadOnlyFn( + POX_CONTRACT, + "get-partial-stacked-by-cycle", + [ + poxAddressToTuple(account.btcAddr), + Cl.uint(rewardCycle), + Cl.principal(address2), + ], + address2 + ); + expect(info.result).toBeNone(); + }); + + it("returns `none` after fully stacked", () => { + const account = stackers[0]; + const amount = getStackingMinimum() * 2n; + const maxAmount = amount * 2n; + const rewardCycle = 1; + const authId = 1; + + delegateStx(maxAmount, address2, null, account.btcAddr, account.stxAddress); + delegateStackStx( + account.stxAddress, + amount, + account.btcAddr, + 1000, + 1, + address2 + ); + + let info = simnet.callReadOnlyFn( + POX_CONTRACT, + "get-partial-stacked-by-cycle", + [ + poxAddressToTuple(account.btcAddr), + Cl.uint(rewardCycle), + Cl.principal(address2), + ], + address2 + ); + expect(info.result).toBeSome( + Cl.tuple({ + "stacked-amount": Cl.uint(amount), + }) + ); + + stackAggregationCommitIndexed( + account, + rewardCycle, + maxAmount, + authId, + address2 + ); + + info = simnet.callReadOnlyFn( + POX_CONTRACT, + "get-partial-stacked-by-cycle", + [ + poxAddressToTuple(account.btcAddr), + Cl.uint(rewardCycle), + Cl.principal(address2), + ], + address2 + ); + expect(info.result).toBeNone(); + }); + + it("returns the correct amount for multiple cycles", () => { + const account = stackers[0]; + const amount = getStackingMinimum() * 2n; + const maxAmount = amount * 2n; + const rewardCycle = 4; + + delegateStx(maxAmount, address2, null, account.btcAddr, account.stxAddress); + delegateStackStx( + account.stxAddress, + amount, + account.btcAddr, + 1000, + 6, + address2 + ); + + const info = simnet.callReadOnlyFn( + POX_CONTRACT, + "get-partial-stacked-by-cycle", + [ + poxAddressToTuple(account.btcAddr), + Cl.uint(rewardCycle), + Cl.principal(address2), + ], + address2 + ); + expect(info.result).toBeSome( + Cl.tuple({ + "stacked-amount": Cl.uint(amount), + }) + ); + }); +}); diff --git a/contrib/boot-contracts-unit-tests/tests/pool-stacker.test.ts b/contrib/boot-contracts-unit-tests/tests/pool-stacker.test.ts new file mode 100644 index 00000000000..eb609ca0ad0 --- /dev/null +++ b/contrib/boot-contracts-unit-tests/tests/pool-stacker.test.ts @@ -0,0 +1,377 @@ +import { beforeEach, describe, expect, it } from "vitest"; +import { + ERRORS, + POX_CONTRACT, + allowContractCaller, + checkDelegateStxEvent, + delegateStx, + disallowContractCaller, + revokeDelegateStx, + stackers, +} from "./helpers"; +import { Cl } from "@stacks/transactions"; +import { poxAddressToTuple } from "@stacks/stacking"; + +const accounts = simnet.getAccounts(); +const deployer = accounts.get("deployer")!; +const address1 = accounts.get("wallet_1")!; +const address2 = accounts.get("wallet_2")!; +const address3 = accounts.get("wallet_3")!; +const initial_balance = 100000000000000n; + +beforeEach(() => { + simnet.setEpoch("3.0"); +}); + +describe("delegate-stx", () => { + const amount = 1000000; + const untilBurnHeight = 1000; + + it("returns `(ok true)` on success", () => { + const delegateResponse = delegateStx( + amount, + address2, + untilBurnHeight, + stackers[0].btcAddr, + address1 + ); + expect(delegateResponse.result).toBeOk(Cl.bool(true)); + }); + + it("can omit the `until-burn-ht`", () => { + const delegateResponse = delegateStx( + amount, + address2, + null, + stackers[0].btcAddr, + address1 + ); + expect(delegateResponse.result).toBeOk(Cl.bool(true)); + }); + + it("can omit the `pox-addr`", () => { + const delegateResponse = delegateStx( + amount, + address2, + null, + null, + address1 + ); + expect(delegateResponse.result).toBeOk(Cl.bool(true)); + }); + + it("emits the correct event on success", () => { + const delegateResponse = delegateStx( + amount, + address2, + untilBurnHeight, + stackers[0].btcAddr, + address1 + ); + expect(delegateResponse.events).toHaveLength(1); + let event = delegateResponse.events[0]; + checkDelegateStxEvent( + event, + address1, + initial_balance, + 0n, + 0n, + BigInt(amount), + address2, + stackers[0].btcAddr, + BigInt(untilBurnHeight) + ); + }); + + it("fails if the account is already delegated", () => { + let delegateResponse = delegateStx( + amount, + address2, + untilBurnHeight, + stackers[0].btcAddr, + address1 + ); + delegateResponse = delegateStx( + amount, + address3, + untilBurnHeight, + stackers[0].btcAddr, + address1 + ); + expect(delegateResponse.result).toBeErr( + Cl.int(ERRORS.ERR_STACKING_ALREADY_DELEGATED) + ); + }); + + it("fails if called indirectly through an unapproved contract", () => { + const delegateStxArgs = [ + Cl.uint(amount), + Cl.principal(address2), + Cl.none(), + Cl.none(), + ]; + + const delegateResponse = simnet.callPublicFn( + "indirect", + "delegate-stx", + delegateStxArgs, + address1 + ); + + expect(delegateResponse.result).toBeErr( + Cl.int(ERRORS.ERR_STACKING_PERMISSION_DENIED) + ); + }); + + it("can be called indirectly through an approved contract", () => { + allowContractCaller(`${deployer}.indirect`, null, address1); + + const delegateStxArgs = [ + Cl.uint(amount), + Cl.principal(address2), + Cl.none(), + Cl.none(), + ]; + + const delegateResponse = simnet.callPublicFn( + "indirect", + "delegate-stx", + delegateStxArgs, + address1 + ); + + expect(delegateResponse.result).toBeOk(Cl.bool(true)); + }); + + it("fails if the pox address version is invalid", () => { + let poxAddr = poxAddressToTuple(stackers[0].btcAddr); + poxAddr.data["version"] = Cl.bufferFromHex("0a"); + const delegateStxArgs = [ + Cl.uint(amount), + Cl.principal(address2), + Cl.none(), + Cl.some(poxAddr), + ]; + + const delegateResponse = simnet.callPublicFn( + POX_CONTRACT, + "delegate-stx", + delegateStxArgs, + address1 + ); + + expect(delegateResponse.result).toBeErr( + Cl.int(ERRORS.ERR_STACKING_INVALID_POX_ADDRESS) + ); + }); + + it("fails if the pox address hashbytes is invalid", () => { + let poxAddr = poxAddressToTuple(stackers[0].btcAddr); + poxAddr.data["hashbytes"] = Cl.bufferFromHex("deadbeef"); + const delegateStxArgs = [ + Cl.uint(amount), + Cl.principal(address2), + Cl.none(), + Cl.some(poxAddr), + ]; + + const delegateResponse = simnet.callPublicFn( + POX_CONTRACT, + "delegate-stx", + delegateStxArgs, + address1 + ); + + expect(delegateResponse.result).toBeErr( + Cl.int(ERRORS.ERR_STACKING_INVALID_POX_ADDRESS) + ); + }); +}); + +describe("revoke-delegate-stx", () => { + it("returns prior state on success", () => { + const amount = 1000000; + const untilBurnHeight = 123; + delegateStx( + amount, + address2, + untilBurnHeight, + stackers[0].btcAddr, + address1 + ); + const revokeResponse = revokeDelegateStx(address1); + expect(revokeResponse.result).toBeOk( + Cl.some( + Cl.tuple({ + "amount-ustx": Cl.uint(amount), + "delegated-to": Cl.principal(address2), + "pox-addr": Cl.some(poxAddressToTuple(stackers[0].btcAddr)), + "until-burn-ht": Cl.some(Cl.uint(untilBurnHeight)), + }) + ) + ); + }); + + it("fails if the account is not delegated", () => { + const revokeResponse = revokeDelegateStx(address1); + expect(revokeResponse.result).toBeErr( + Cl.int(ERRORS.ERR_DELEGATION_ALREADY_REVOKED) + ); + }); + + it("fails if the delegation was already revoked", () => { + const amount = 1000000; + const untilBurnHeight = 123; + delegateStx( + amount, + address2, + untilBurnHeight, + stackers[0].btcAddr, + address1 + ); + + // First revoke passes + let revokeResponse = revokeDelegateStx(address1); + expect(revokeResponse.result).toBeOk( + Cl.some( + Cl.tuple({ + "amount-ustx": Cl.uint(amount), + "delegated-to": Cl.principal(address2), + "pox-addr": Cl.some(poxAddressToTuple(stackers[0].btcAddr)), + "until-burn-ht": Cl.some(Cl.uint(untilBurnHeight)), + }) + ) + ); + + // Second revoke fails + revokeResponse = revokeDelegateStx(address1); + expect(revokeResponse.result).toBeErr( + Cl.int(ERRORS.ERR_DELEGATION_ALREADY_REVOKED) + ); + }); + + it("fails if the delegation has expired", () => { + const amount = 1000000; + const untilBurnHeight = 3; + delegateStx( + amount, + address2, + untilBurnHeight, + stackers[0].btcAddr, + address1 + ); + while (simnet.blockHeight <= untilBurnHeight) { + simnet.mineEmptyBlock(); + } + const revokeResponse = revokeDelegateStx(address1); + expect(revokeResponse.result).toBeErr( + Cl.int(ERRORS.ERR_DELEGATION_ALREADY_REVOKED) + ); + }); + + it("fails when called by unapproved caller", () => { + const revokeResponse = simnet.callPublicFn( + "indirect", + "revoke-delegate-stx", + [], + address1 + ); + + expect(revokeResponse.result).toBeErr( + Cl.int(ERRORS.ERR_STACKING_PERMISSION_DENIED) + ); + }); + + it("passes when called by approved caller", () => { + const amount = 1000000; + const untilBurnHeight = 123; + + delegateStx( + amount, + address2, + untilBurnHeight, + stackers[0].btcAddr, + address1 + ); + allowContractCaller(`${deployer}.indirect`, null, address1); + + const revokeResponse = simnet.callPublicFn( + "indirect", + "revoke-delegate-stx", + [], + address1 + ); + + expect(revokeResponse.result).toBeOk( + Cl.some( + Cl.tuple({ + "amount-ustx": Cl.uint(amount), + "delegated-to": Cl.principal(address2), + "pox-addr": Cl.some(poxAddressToTuple(stackers[0].btcAddr)), + "until-burn-ht": Cl.some(Cl.uint(untilBurnHeight)), + }) + ) + ); + }); +}); + +describe("allow-contract-caller", () => { + it("returns `(ok true)` on success", () => { + const response = allowContractCaller( + `${deployer}.indirect`, + null, + address1 + ); + expect(response.result).toBeOk(Cl.bool(true)); + }); + + it("cannot be called indirectly", () => { + const response = simnet.callPublicFn( + "indirect", + "allow-contract-caller", + [Cl.principal(`${deployer}.indirect`), Cl.none()], + address1 + ); + expect(response.result).toBeErr( + Cl.int(ERRORS.ERR_STACKING_PERMISSION_DENIED) + ); + }); +}); + +describe("disallow-contract-caller", () => { + it("returns `(ok true)` on success", () => { + allowContractCaller(`${deployer}.indirect`, null, address1); + const response = disallowContractCaller(`${deployer}.indirect`, address1); + expect(response.result).toBeOk(Cl.bool(true)); + }); + + it("cannot be called indirectly", () => { + const response = simnet.callPublicFn( + "indirect", + "disallow-contract-caller", + [Cl.principal(`${deployer}.indirect`)], + address1 + ); + expect(response.result).toBeErr( + Cl.int(ERRORS.ERR_STACKING_PERMISSION_DENIED) + ); + }); + + it("cannot be called indirectly, even by an approved caller", () => { + allowContractCaller(`${deployer}.indirect`, null, address1); + const response = simnet.callPublicFn( + "indirect", + "disallow-contract-caller", + [Cl.principal(`${deployer}.indirect`)], + address1 + ); + expect(response.result).toBeErr( + Cl.int(ERRORS.ERR_STACKING_PERMISSION_DENIED) + ); + }); + + it("returns `(ok false)` if the caller was not allowed", () => { + const response = disallowContractCaller(`${deployer}.indirect`, address1); + expect(response.result).toBeOk(Cl.bool(false)); + }); +}); diff --git a/contrib/boot-contracts-unit-tests/tests/scenarios.test.ts b/contrib/boot-contracts-unit-tests/tests/scenarios.test.ts new file mode 100644 index 00000000000..a5ae2fe5b28 --- /dev/null +++ b/contrib/boot-contracts-unit-tests/tests/scenarios.test.ts @@ -0,0 +1,345 @@ +import { beforeEach, describe, expect, it } from "vitest"; +import { + ERRORS, + delegateStackExtend, + delegateStackIncrease, + delegateStackStx, + delegateStx, + getPoxInfo, + getStackingMinimum, + revokeDelegateStx, + stackers, +} from "./helpers"; +import { Cl } from "@stacks/transactions"; +import { poxAddressToTuple } from "@stacks/stacking"; + +const accounts = simnet.getAccounts(); +const address1 = accounts.get("wallet_1")!; +const address2 = accounts.get("wallet_2")!; +const address3 = accounts.get("wallet_3")!; + +beforeEach(() => { + simnet.setEpoch("3.0"); +}); + +describe("switching delegates`", () => { + it("is allowed while stacked", () => { + const amount = getStackingMinimum() * 2n; + + // Delegate to address2 + let delegateResponse = delegateStx( + amount, + address2, + null, + stackers[0].btcAddr, + address1 + ); + expect(delegateResponse.result).toBeOk(Cl.bool(true)); + + // Address2 stacks + const { result } = delegateStackStx( + address1, + amount, + stackers[0].btcAddr, + simnet.blockHeight, + 4, + address2 + ); + expect(result).toBeOk( + Cl.tuple({ + "lock-amount": Cl.uint(amount), + stacker: Cl.principal(address1), + "unlock-burn-height": Cl.uint(5250), + }) + ); + + // Revoke delegation to address2 + const revokeResponse = revokeDelegateStx(address1); + expect(revokeResponse.result).toBeOk( + Cl.some( + Cl.tuple({ + "amount-ustx": Cl.uint(amount), + "delegated-to": Cl.principal(address2), + "pox-addr": Cl.some(poxAddressToTuple(stackers[0].btcAddr)), + "until-burn-ht": Cl.none(), + }) + ) + ); + + // Delegate to address3 + delegateResponse = delegateStx( + amount, + address3, + null, + stackers[0].btcAddr, + address1 + ); + expect(delegateResponse.result).toBeOk(Cl.bool(true)); + }); + + it("revoked delegate cannot extend or increase", () => { + const stackingMinimum = getStackingMinimum(); + const amount = stackingMinimum * 2n; + + // Delegate to address2 + let delegateResponse = delegateStx( + amount, + address2, + null, + stackers[0].btcAddr, + address1 + ); + expect(delegateResponse.result).toBeOk(Cl.bool(true)); + + // Address2 stacks + const { result } = delegateStackStx( + address1, + stackingMinimum, + stackers[0].btcAddr, + simnet.blockHeight, + 2, + address2 + ); + expect(result).toBeOk( + Cl.tuple({ + "lock-amount": Cl.uint(stackingMinimum), + stacker: Cl.principal(address1), + "unlock-burn-height": Cl.uint(3150), + }) + ); + + // Revoke delegation to address2 + const revokeResponse = revokeDelegateStx(address1); + expect(revokeResponse.result).toBeOk( + Cl.some( + Cl.tuple({ + "amount-ustx": Cl.uint(amount), + "delegated-to": Cl.principal(address2), + "pox-addr": Cl.some(poxAddressToTuple(stackers[0].btcAddr)), + "until-burn-ht": Cl.none(), + }) + ) + ); + + // Delegate to address3 + delegateResponse = delegateStx( + amount, + address3, + null, + stackers[1].btcAddr, + address1 + ); + expect(delegateResponse.result).toBeOk(Cl.bool(true)); + + // Address2 tries to extend + let extendResponse = delegateStackExtend( + address1, + stackers[0].btcAddr, + 1n, + address2 + ); + expect(extendResponse.result).toBeErr( + Cl.int(ERRORS.ERR_STACKING_PERMISSION_DENIED) + ); + + // Address2 tries to increase + let increaseResponse = delegateStackIncrease( + address1, + stackers[0].btcAddr, + 100n, + address2 + ); + expect(increaseResponse.result).toBeErr( + Cl.int(ERRORS.ERR_STACKING_PERMISSION_DENIED) + ); + }); + + it("new delegate cannot lock before previous delegation unlocks", () => { + const stackingMinimum = getStackingMinimum(); + const amount = stackingMinimum * 2n; + const poxInfo = getPoxInfo(); + let unlockHeight = poxInfo.rewardCycleLength * 3n; + + // Delegate to address2 + let delegateResponse = delegateStx( + amount, + address2, + null, + stackers[0].btcAddr, + address1 + ); + expect(delegateResponse.result).toBeOk(Cl.bool(true)); + + // Address2 stacks + let delegateStackStxResponse = delegateStackStx( + address1, + stackingMinimum, + stackers[0].btcAddr, + simnet.blockHeight, + 2, + address2 + ); + expect(delegateStackStxResponse.result).toBeOk( + Cl.tuple({ + "lock-amount": Cl.uint(stackingMinimum), + stacker: Cl.principal(address1), + "unlock-burn-height": Cl.uint(unlockHeight), + }) + ); + + // Revoke delegation to address2 + const revokeResponse = revokeDelegateStx(address1); + expect(revokeResponse.result).toBeOk( + Cl.some( + Cl.tuple({ + "amount-ustx": Cl.uint(amount), + "delegated-to": Cl.principal(address2), + "pox-addr": Cl.some(poxAddressToTuple(stackers[0].btcAddr)), + "until-burn-ht": Cl.none(), + }) + ) + ); + + // Delegate to address3 + delegateResponse = delegateStx( + amount, + address3, + null, + stackers[1].btcAddr, + address1 + ); + expect(delegateResponse.result).toBeOk(Cl.bool(true)); + + // Address3 tries to re-stack + delegateStackStxResponse = delegateStackStx( + address1, + stackingMinimum, + stackers[1].btcAddr, + simnet.blockHeight, + 2, + address3 + ); + expect(delegateStackStxResponse.result).toBeErr( + Cl.int(ERRORS.ERR_STACKING_ALREADY_STACKED) + ); + + // Address3 can stack after unlock + simnet.mineEmptyBlocks(Number(unlockHeight) - simnet.blockHeight + 1); + unlockHeight = poxInfo.rewardCycleLength * 6n; + + delegateStackStxResponse = delegateStackStx( + address1, + stackingMinimum + 2n, + stackers[1].btcAddr, + simnet.blockHeight, + 2, + address3 + ); + expect(delegateStackStxResponse.result).toBeOk( + Cl.tuple({ + "lock-amount": Cl.uint(stackingMinimum + 2n), + stacker: Cl.principal(address1), + "unlock-burn-height": Cl.uint(unlockHeight), + }) + ); + }); + + it("New delegate cannot extend or increase", () => { + const stackingMinimum = getStackingMinimum(); + const amount = stackingMinimum * 2n; + + // Delegate to address2 + let delegateResponse = delegateStx( + amount, + address2, + null, + stackers[0].btcAddr, + address1 + ); + expect(delegateResponse.result).toBeOk(Cl.bool(true)); + + // Address2 stacks + const { result } = delegateStackStx( + address1, + stackingMinimum, + stackers[0].btcAddr, + simnet.blockHeight, + 2, + address2 + ); + expect(result).toBeOk( + Cl.tuple({ + "lock-amount": Cl.uint(stackingMinimum), + stacker: Cl.principal(address1), + "unlock-burn-height": Cl.uint(3150), + }) + ); + + // Revoke delegation to address2 + const revokeResponse = revokeDelegateStx(address1); + expect(revokeResponse.result).toBeOk( + Cl.some( + Cl.tuple({ + "amount-ustx": Cl.uint(amount), + "delegated-to": Cl.principal(address2), + "pox-addr": Cl.some(poxAddressToTuple(stackers[0].btcAddr)), + "until-burn-ht": Cl.none(), + }) + ) + ); + + // Delegate to address3 + delegateResponse = delegateStx( + amount, + address3, + null, + stackers[1].btcAddr, + address1 + ); + expect(delegateResponse.result).toBeOk(Cl.bool(true)); + + // Address3 tries to extend to same pox address + let extendResponse = delegateStackExtend( + address1, + stackers[0].btcAddr, + 1n, + address3 + ); + expect(extendResponse.result).toBeErr( + Cl.int(ERRORS.ERR_STACKING_PERMISSION_DENIED) + ); + + // Address3 tries to extend to new pox address + extendResponse = delegateStackExtend( + address1, + stackers[1].btcAddr, + 1n, + address3 + ); + expect(extendResponse.result).toBeErr( + Cl.int(ERRORS.ERR_STACKING_PERMISSION_DENIED) + ); + + // Address3 tries to increase with same pox address + let increaseResponse = delegateStackIncrease( + address1, + stackers[0].btcAddr, + 100n, + address3 + ); + expect(increaseResponse.result).toBeErr( + Cl.int(ERRORS.ERR_STACKING_PERMISSION_DENIED) + ); + + // Address3 tries to increase with new pox address + increaseResponse = delegateStackIncrease( + address1, + stackers[1].btcAddr, + 100n, + address3 + ); + expect(increaseResponse.result).toBeErr( + Cl.int(ERRORS.ERR_STACKING_PERMISSION_DENIED) + ); + }); +}); diff --git a/contrib/boot-contracts-unit-tests/tests/solo-stacker.test.ts b/contrib/boot-contracts-unit-tests/tests/solo-stacker.test.ts new file mode 100644 index 00000000000..b3531593b4e --- /dev/null +++ b/contrib/boot-contracts-unit-tests/tests/solo-stacker.test.ts @@ -0,0 +1,1464 @@ +import { Cl, ClarityType, isClarityType } from "@stacks/transactions"; +import { describe, expect, it, beforeEach, assert } from "vitest"; + +import { Pox4SignatureTopic, poxAddressToTuple } from "@stacks/stacking"; +import { Simnet } from "@hirosystems/clarinet-sdk"; +import { + ERRORS, + POX_CONTRACT, + allowContractCaller, + burnHeightToRewardCycle, + delegateStackStx, + delegateStx, + getPoxInfo, + stackExtend, + stackIncrease, + stackStx, + stackers, +} from "./helpers"; + +const accounts = simnet.getAccounts(); +const deployer = accounts.get("deployer")!; +const address1 = accounts.get("wallet_1")!; +const address2 = accounts.get("wallet_2")!; + +const initialSTXBalance = 100_000_000 * 1e6; + +const maxAmount = 20960000000000; + +const getTotalStacked = ( + simnet: Simnet, + poxContract: string, + cycleId: number | bigint +) => { + const totalStacked = simnet.callReadOnlyFn( + poxContract, + "get-total-ustx-stacked", + [Cl.uint(cycleId)], + address1 + ); + // @ts-ignore + return totalStacked.result.value as bigint; +}; + +const stackingThreshold = 125000000000; + +describe("pox-4", () => { + beforeEach(async () => { + simnet.setEpoch("3.0"); + }); + + it("can call get-pox-info", async () => { + const poxInfo = simnet.callReadOnlyFn( + POX_CONTRACT, + "get-pox-info", + [], + address1 + ); + assert(isClarityType(poxInfo.result, ClarityType.ResponseOk)); + }); + + /* + (stack-stx (amount-ustx uint) + (pox-addr (tuple (version (buff 1)) (hashbytes (buff 32)))) + (start-burn-ht uint) + (lock-period uint) + (signer-sig (optional (buff 65))) + (signer-key (buff 33)) + (max-amount uint) + (auth-id uint)) + */ + + describe("stack-stx", () => { + it("can stack stxs", async () => { + const account = stackers[0]; + const rewardCycle = 0; + const burnBlockHeight = 1; + const period = 10; + const authId = 1; + + const sigArgs = { + authId, + maxAmount, + rewardCycle, + period, + topic: Pox4SignatureTopic.StackStx, + poxAddress: account.btcAddr, + signerPrivateKey: account.signerPrivKey, + }; + const signerSignature = account.client.signPoxSignature(sigArgs); + const signerKey = Cl.bufferFromHex(account.signerPubKey); + const ustxAmount = Math.floor(stackingThreshold * 1.5); + + const stackStxArgs = [ + Cl.uint(ustxAmount), + poxAddressToTuple(account.btcAddr), + Cl.uint(burnBlockHeight), + Cl.uint(period), + Cl.some(Cl.bufferFromHex(signerSignature)), + signerKey, + Cl.uint(maxAmount), + Cl.uint(authId), + ]; + + const response = simnet.callPublicFn( + POX_CONTRACT, + "stack-stx", + stackStxArgs, + address1 + ); + + expect(response.result).toBeOk( + Cl.tuple({ + "lock-amount": Cl.uint(187500000000), + "signer-key": Cl.bufferFromHex(account.signerPubKey), + stacker: Cl.principal(address1), + "unlock-burn-height": Cl.uint(11550), + }) + ); + + const stxAccount = simnet.runSnippet(`(stx-account '${address1})`); + expect(stxAccount).toBeTuple({ + locked: Cl.uint(ustxAmount), + unlocked: Cl.uint(initialSTXBalance - ustxAmount), + "unlock-height": Cl.uint(11550), + }); + }); + + it("unlocks stxs after period is ended", async () => { + const account = stackers[0]; + const rewardCycle = 0; + const burnBlockHeight = 1; + const period = 2; + const authId = 1; + + const sigArgs = { + authId, + maxAmount, + rewardCycle, + period, + topic: Pox4SignatureTopic.StackStx, + poxAddress: account.btcAddr, + signerPrivateKey: account.signerPrivKey, + }; + const signerSignature = account.client.signPoxSignature(sigArgs); + const signerKey = Cl.bufferFromHex(account.signerPubKey); + const ustxAmount = initialSTXBalance * 0.2; // lock 20% of total balance + + const stackStxArgs = [ + Cl.uint(ustxAmount), + poxAddressToTuple(account.btcAddr), + Cl.uint(burnBlockHeight), + Cl.uint(period), + Cl.some(Cl.bufferFromHex(signerSignature)), + signerKey, + Cl.uint(maxAmount), + Cl.uint(authId), + ]; + + const response = simnet.callPublicFn( + POX_CONTRACT, + "stack-stx", + stackStxArgs, + address1 + ); + expect(response.result).toHaveClarityType(ClarityType.ResponseOk); + + // try to transfer 90% of balance (should fail because 20% is locked) + const { result: resultErr } = simnet.transferSTX( + initialSTXBalance * 0.9, + address2, + address1 + ); + expect(resultErr).toBeErr(Cl.uint(1)); + + simnet.mineEmptyBlocks(4000); + + const stxAccount = simnet.runSnippet(`(stx-account '${address1})`); + expect(stxAccount).toBeTuple({ + locked: Cl.uint(0), + unlocked: Cl.uint(initialSTXBalance), + "unlock-height": Cl.uint(0), + }); + + // try to transfer 90% of balance (should succeed because period is ended) + const { result: resultOk } = simnet.transferSTX( + initialSTXBalance * 0.9, + address2, + address1 + ); + expect(resultOk).toBeOk(Cl.bool(true)); + }); + + it("can stack stxs from multiple accounts with the same key", () => { + const signerAccount = stackers[0]; + const rewardCycle = 0; + const burnBlockHeight = 0; + const period = 10; + + const signerAccountKey = Cl.bufferFromHex(signerAccount.signerPubKey); + + let i = 0; + for (const account of stackers) { + const authId = i; + i++; + const sigArgs = { + authId, + maxAmount, + rewardCycle, + period, + topic: Pox4SignatureTopic.StackStx, + poxAddress: account.btcAddr, + signerPrivateKey: signerAccount.signerPrivKey, + }; + const signerSignature = signerAccount.client.signPoxSignature(sigArgs); + const ustxAmount = Math.floor(stackingThreshold * 1.5); + + const stackStxArgs = [ + Cl.uint(ustxAmount), + poxAddressToTuple(account.btcAddr), + Cl.uint(burnBlockHeight), + Cl.uint(period), + Cl.some(Cl.bufferFromHex(signerSignature)), + signerAccountKey, + Cl.uint(maxAmount), + Cl.uint(authId), + ]; + + const response = simnet.callPublicFn( + POX_CONTRACT, + "stack-stx", + stackStxArgs, + account.stxAddress + ); + + expect(response.result).toBeOk( + Cl.tuple({ + "lock-amount": Cl.uint(187500000000), + "signer-key": Cl.bufferFromHex(signerAccount.signerPubKey), + stacker: Cl.principal(account.stxAddress), + "unlock-burn-height": Cl.uint(11550), + }) + ); + + const stxAccount = simnet.runSnippet( + `(stx-account '${account.stxAddress})` + ); + expect(stxAccount).toBeTuple({ + locked: Cl.uint(ustxAmount), + unlocked: Cl.uint(initialSTXBalance - ustxAmount), + "unlock-height": Cl.uint(11550), + }); + } + }); + + it("returns an error for an invalid start height", async () => { + const account = stackers[0]; + const burnBlockHeight = 2000; + const period = 10; + const authId = 1; + const ustxAmount = Math.floor(stackingThreshold * 1.5); + + const response = stackStx( + account, + ustxAmount, + burnBlockHeight, + period, + ustxAmount, + authId, + address1 + ); + + expect(response.result).toBeErr( + Cl.int(ERRORS.ERR_INVALID_START_BURN_HEIGHT) + ); + }); + + it("cannot be called indirectly by an unapproved caller", async () => { + const account = stackers[0]; + const rewardCycle = 0; + const burnBlockHeight = 1; + const period = 10; + const authId = 1; + + const sigArgs = { + authId, + maxAmount, + rewardCycle, + period, + topic: Pox4SignatureTopic.StackStx, + poxAddress: account.btcAddr, + signerPrivateKey: account.signerPrivKey, + }; + const signerSignature = account.client.signPoxSignature(sigArgs); + const signerKey = Cl.bufferFromHex(account.signerPubKey); + const ustxAmount = Math.floor(stackingThreshold * 1.5); + + const stackStxArgs = [ + Cl.uint(ustxAmount), + poxAddressToTuple(account.btcAddr), + Cl.uint(burnBlockHeight), + Cl.uint(period), + Cl.some(Cl.bufferFromHex(signerSignature)), + signerKey, + Cl.uint(maxAmount), + Cl.uint(authId), + ]; + + const response = simnet.callPublicFn( + "indirect", + "stack-stx", + stackStxArgs, + address1 + ); + + expect(response.result).toBeErr( + Cl.int(ERRORS.ERR_STACKING_PERMISSION_DENIED) + ); + }); + + it("can be called indirectly by an approved caller", async () => { + const account = stackers[0]; + const rewardCycle = 0; + const burnBlockHeight = 1; + const period = 10; + const authId = 1; + + allowContractCaller(`${deployer}.indirect`, null, address1); + + const sigArgs = { + authId, + maxAmount, + rewardCycle, + period, + topic: Pox4SignatureTopic.StackStx, + poxAddress: account.btcAddr, + signerPrivateKey: account.signerPrivKey, + }; + const signerSignature = account.client.signPoxSignature(sigArgs); + const signerKey = Cl.bufferFromHex(account.signerPubKey); + const ustxAmount = Math.floor(stackingThreshold * 1.5); + + const stackStxArgs = [ + Cl.uint(ustxAmount), + poxAddressToTuple(account.btcAddr), + Cl.uint(burnBlockHeight), + Cl.uint(period), + Cl.some(Cl.bufferFromHex(signerSignature)), + signerKey, + Cl.uint(maxAmount), + Cl.uint(authId), + ]; + + const response = simnet.callPublicFn( + "indirect", + "stack-stx", + stackStxArgs, + address1 + ); + + expect(response.result).toBeOk( + Cl.tuple({ + "lock-amount": Cl.uint(187500000000), + "signer-key": Cl.bufferFromHex(account.signerPubKey), + stacker: Cl.principal(account.stxAddress), + "unlock-burn-height": Cl.uint(11550), + }) + ); + + const stxAccount = simnet.runSnippet( + `(stx-account '${account.stxAddress})` + ); + expect(stxAccount).toBeTuple({ + locked: Cl.uint(ustxAmount), + unlocked: Cl.uint(initialSTXBalance - ustxAmount), + "unlock-height": Cl.uint(11550), + }); + }); + + it("returns an error if the stacker is already stacked", async () => { + const account = stackers[0]; + const burnBlockHeight = 0; + const period = 10; + const authId = 1; + const ustxAmount = Math.floor(stackingThreshold * 1.5); + + stackStx( + account, + ustxAmount, + burnBlockHeight, + period, + ustxAmount, + authId, + address1 + ); + + const response = stackStx( + account, + ustxAmount, + burnBlockHeight, + period, + ustxAmount, + authId, + address1 + ); + + expect(response.result).toBeErr( + Cl.int(ERRORS.ERR_STACKING_ALREADY_STACKED) + ); + }); + + it("returns an error if the stacker is already delegated", async () => { + const account = stackers[0]; + const burnBlockHeight = 0; + const period = 10; + const authId = 1; + const ustxAmount = Math.floor(stackingThreshold * 1.5); + + delegateStx( + ustxAmount, + address2, + burnBlockHeight, + account.btcAddr, + address1 + ); + + const response = stackStx( + account, + ustxAmount, + burnBlockHeight, + period, + ustxAmount, + authId, + address1 + ); + + expect(response.result).toBeErr( + Cl.int(ERRORS.ERR_STACKING_ALREADY_DELEGATED) + ); + }); + + it("returns an error if the stacker has an insufficient balance", async () => { + const account = stackers[0]; + const burnBlockHeight = 0; + const period = 10; + const authId = 1; + const ustxAmount = simnet.getAssetsMap().get("STX")?.get(address1)! + 10n; + + const response = stackStx( + account, + ustxAmount, + burnBlockHeight, + period, + ustxAmount, + authId, + address1 + ); + + expect(response.result).toBeErr( + Cl.int(ERRORS.ERR_STACKING_INSUFFICIENT_FUNDS) + ); + }); + + it("returns an error if the signature is already used", async () => { + const account = stackers[0]; + const burnBlockHeight = 0; + const period = 10; + const authId = 1; + const ustxAmount = Math.floor(stackingThreshold * 1.5); + const rewardCycle = 0; + + const sigArgs = { + authId, + maxAmount, + rewardCycle, + period, + topic: Pox4SignatureTopic.StackStx, + poxAddress: account.btcAddr, + signerPrivateKey: account.signerPrivKey, + }; + const signerSignature = account.client.signPoxSignature(sigArgs); + + simnet.callPrivateFn( + POX_CONTRACT, + "consume-signer-key-authorization", + [ + poxAddressToTuple(account.btcAddr), + Cl.uint(rewardCycle), + Cl.stringAscii(Pox4SignatureTopic.StackStx), + Cl.uint(period), + Cl.some(Cl.bufferFromHex(signerSignature)), + Cl.bufferFromHex(account.signerPubKey), + Cl.uint(ustxAmount), + Cl.uint(maxAmount), + Cl.uint(authId), + ], + address1 + ); + + const response = stackStx( + account, + ustxAmount, + burnBlockHeight, + period, + maxAmount, + authId, + address1 + ); + + expect(response.result).toBeErr(Cl.int(ERRORS.ERR_SIGNER_AUTH_USED)); + }); + }); + + describe("stack-extend", () => { + it("can extend stacking during the last stacking cycle", () => { + const poxInfo = getPoxInfo(); + const cycleLength = Number(poxInfo.rewardCycleLength); + + const account = stackers[0]; + const burnBlockHeight = 1; + const authId = account.authId; + + const stackSignature = account.client.signPoxSignature({ + authId, + maxAmount, + rewardCycle: 0, + period: 2, + topic: Pox4SignatureTopic.StackStx, + poxAddress: account.btcAddr, + signerPrivateKey: account.signerPrivKey, + }); + const signerKey = Cl.bufferFromHex(account.signerPubKey); + const ustxAmount = Math.floor(stackingThreshold * 1.5); + + const stackStxArgs = [ + Cl.uint(ustxAmount), + poxAddressToTuple(account.btcAddr), + Cl.uint(burnBlockHeight), + Cl.uint(2), + Cl.some(Cl.bufferFromHex(stackSignature)), + signerKey, + Cl.uint(maxAmount), + Cl.uint(authId), + ]; + const response = simnet.callPublicFn( + POX_CONTRACT, + "stack-stx", + stackStxArgs, + address1 + ); + expect(response.result).toHaveClarityType(ClarityType.ResponseOk); + + // advance to cycle 1 + simnet.mineEmptyBlocks(cycleLength); + + // advance to cycle 2 + simnet.mineEmptyBlocks(cycleLength); + // call stack-extend for 2 more cycles + const extendSignature = account.client.signPoxSignature({ + authId, + maxAmount, + rewardCycle: 2, + period: 2, + topic: Pox4SignatureTopic.StackExtend, + poxAddress: account.btcAddr, + signerPrivateKey: account.signerPrivKey, + }); + const extendArgs = [ + Cl.uint(2), + poxAddressToTuple(account.btcAddr), + Cl.some(Cl.bufferFromHex(extendSignature)), + signerKey, + Cl.uint(maxAmount), + Cl.uint(authId), + ]; + const { result } = simnet.callPublicFn( + POX_CONTRACT, + "stack-extend", + extendArgs, + address1 + ); + expect(result).toBeOk( + Cl.tuple({ + stacker: Cl.principal(address1), + "unlock-burn-height": Cl.uint(cycleLength * 5), + }) + ); + + // advance to cycle 3 + simnet.mineEmptyBlocks(cycleLength); + const totalCycle3 = getTotalStacked(simnet, POX_CONTRACT, 3); + expect(totalCycle3).toBe(BigInt(ustxAmount)); + + // advance to cycle 4 + simnet.mineEmptyBlocks(cycleLength); + const totalCycle4 = getTotalStacked(simnet, POX_CONTRACT, 4); + expect(totalCycle4).toBe(BigInt(ustxAmount)); + + // advance to cycle 5 + simnet.mineEmptyBlocks(cycleLength); + const totalCycle5 = getTotalStacked(simnet, POX_CONTRACT, 5); + expect(totalCycle5).toBe(0n); + }); + + it("can extend stacking up to 11 cycles", () => { + const poxInfo = getPoxInfo(); + const cycleLength = Number(poxInfo.rewardCycleLength); + + const account = stackers[0]; + const burnBlockHeight = 1; + const authId = account.authId; + + const stackSignature = account.client.signPoxSignature({ + authId, + maxAmount, + rewardCycle: 0, + period: 2, + topic: Pox4SignatureTopic.StackStx, + poxAddress: account.btcAddr, + signerPrivateKey: account.signerPrivKey, + }); + const signerKey = Cl.bufferFromHex(account.signerPubKey); + const ustxAmount = Math.floor(stackingThreshold * 1.5); + + const stackStxArgs = [ + Cl.uint(ustxAmount), + poxAddressToTuple(account.btcAddr), + Cl.uint(burnBlockHeight), + Cl.uint(2), + Cl.some(Cl.bufferFromHex(stackSignature)), + signerKey, + Cl.uint(maxAmount), + Cl.uint(authId), + ]; + simnet.callPublicFn(POX_CONTRACT, "stack-stx", stackStxArgs, address1); + + // advance to cycle 1 + simnet.mineEmptyBlocks(cycleLength * 2); + + // call stack-extend for 11 more cycles + const extendSignature = account.client.signPoxSignature({ + authId, + maxAmount, + rewardCycle: 2, + period: 11, + topic: Pox4SignatureTopic.StackExtend, + poxAddress: account.btcAddr, + signerPrivateKey: account.signerPrivKey, + }); + const extendArgs = [ + Cl.uint(11), + poxAddressToTuple(account.btcAddr), + Cl.some(Cl.bufferFromHex(extendSignature)), + signerKey, + Cl.uint(maxAmount), + Cl.uint(authId), + ]; + const { result } = simnet.callPublicFn( + POX_CONTRACT, + "stack-extend", + extendArgs, + address1 + ); + expect(result).toBeOk( + Cl.tuple({ + stacker: Cl.principal(address1), + "unlock-burn-height": Cl.uint(cycleLength * (11 + 2 + 1)), + }) + ); + }); + + it("can not extend stacking for more than 11 cycles", () => { + const poxInfo = getPoxInfo(); + const cycleLength = Number(poxInfo.rewardCycleLength); + + const account = stackers[0]; + const burnBlockHeight = 1; + const authId = account.authId; + + const stackSignature = account.client.signPoxSignature({ + authId, + maxAmount, + rewardCycle: 0, + period: 2, + topic: Pox4SignatureTopic.StackStx, + poxAddress: account.btcAddr, + signerPrivateKey: account.signerPrivKey, + }); + const signerKey = Cl.bufferFromHex(account.signerPubKey); + const ustxAmount = Math.floor(stackingThreshold * 1.5); + + const stackStxArgs = [ + Cl.uint(ustxAmount), + poxAddressToTuple(account.btcAddr), + Cl.uint(burnBlockHeight), + Cl.uint(2), + Cl.some(Cl.bufferFromHex(stackSignature)), + signerKey, + Cl.uint(maxAmount), + Cl.uint(authId), + ]; + simnet.callPublicFn(POX_CONTRACT, "stack-stx", stackStxArgs, address1); + + // advance to cycle 1 + simnet.mineEmptyBlocks(cycleLength * 2); + + // call stack-extend for 12 more cycles + const extendSignature = account.client.signPoxSignature({ + authId, + maxAmount, + rewardCycle: 2, + period: 12, + topic: Pox4SignatureTopic.StackExtend, + poxAddress: account.btcAddr, + signerPrivateKey: account.signerPrivKey, + }); + const extendArgs = [ + Cl.uint(12), + poxAddressToTuple(account.btcAddr), + Cl.some(Cl.bufferFromHex(extendSignature)), + signerKey, + Cl.uint(maxAmount), + Cl.uint(authId), + ]; + const { result } = simnet.callPublicFn( + POX_CONTRACT, + "stack-extend", + extendArgs, + address1 + ); + expect(result).toBeErr(Cl.int(ERRORS.ERR_STACKING_INVALID_LOCK_PERIOD)); + }); + + it("can extend stacking during any stacking cycle", () => { + const poxInfo = getPoxInfo(); + const cycleLength = Number(poxInfo.rewardCycleLength); + + const account = stackers[0]; + const burnBlockHeight = 1; + const authId = account.authId; + + const stackSignature = account.client.signPoxSignature({ + authId, + maxAmount, + rewardCycle: 0, + period: 2, + topic: Pox4SignatureTopic.StackStx, + poxAddress: account.btcAddr, + signerPrivateKey: account.signerPrivKey, + }); + const signerKey = Cl.bufferFromHex(account.signerPubKey); + const ustxAmount = Math.floor(stackingThreshold * 1.5); + + const stackStxArgs = [ + Cl.uint(ustxAmount), + poxAddressToTuple(account.btcAddr), + Cl.uint(burnBlockHeight), + Cl.uint(2), + Cl.some(Cl.bufferFromHex(stackSignature)), + signerKey, + Cl.uint(maxAmount), + Cl.uint(authId), + ]; + const response = simnet.callPublicFn( + POX_CONTRACT, + "stack-stx", + stackStxArgs, + address1 + ); + expect(response.result).toHaveClarityType(ClarityType.ResponseOk); + + // advance to cycle 1 + simnet.mineEmptyBlocks(cycleLength); + // call stack-extend for 2 more cycles + const extendSignature = account.client.signPoxSignature({ + authId, + maxAmount, + rewardCycle: 1, + period: 2, + topic: Pox4SignatureTopic.StackExtend, + poxAddress: account.btcAddr, + signerPrivateKey: account.signerPrivKey, + }); + const extendArgs = [ + Cl.uint(2), + poxAddressToTuple(account.btcAddr), + Cl.some(Cl.bufferFromHex(extendSignature)), + signerKey, + Cl.uint(maxAmount), + Cl.uint(authId), + ]; + const { result } = simnet.callPublicFn( + POX_CONTRACT, + "stack-extend", + extendArgs, + address1 + ); + expect(result).toBeOk( + Cl.tuple({ + stacker: Cl.principal(address1), + "unlock-burn-height": Cl.uint(cycleLength * 5), + }) + ); + + // advance to cycle 2 + simnet.mineEmptyBlocks(cycleLength); + const totalCycle2 = getTotalStacked(simnet, POX_CONTRACT, 2); + expect(totalCycle2).toBe(BigInt(ustxAmount)); + + // advance to cycle 3 + simnet.mineEmptyBlocks(cycleLength); + const totalCycle3 = getTotalStacked(simnet, POX_CONTRACT, 3); + expect(totalCycle3).toBe(BigInt(ustxAmount)); + + // advance to cycle 4 + simnet.mineEmptyBlocks(cycleLength); + const totalCycle4 = getTotalStacked(simnet, POX_CONTRACT, 4); + expect(totalCycle4).toBe(BigInt(ustxAmount)); + + // advance to cycle 5 + simnet.mineEmptyBlocks(cycleLength); + const totalCycle5 = getTotalStacked(simnet, POX_CONTRACT, 5); + expect(totalCycle5).toBe(0n); + }); + + it("can not extend stacking after stacking end", () => { + const poxInfo = getPoxInfo(); + const cycleLength = Number(poxInfo.rewardCycleLength); + + const account = stackers[0]; + const burnBlockHeight = 1; + const authId = account.authId; + + const stackSignature = account.client.signPoxSignature({ + authId, + maxAmount, + rewardCycle: 0, + period: 2, + topic: Pox4SignatureTopic.StackStx, + poxAddress: account.btcAddr, + signerPrivateKey: account.signerPrivKey, + }); + const signerKey = Cl.bufferFromHex(account.signerPubKey); + const ustxAmount = Math.floor(stackingThreshold * 1.5); + + const stackStxArgs = [ + Cl.uint(ustxAmount), + poxAddressToTuple(account.btcAddr), + Cl.uint(burnBlockHeight), + Cl.uint(2), + Cl.some(Cl.bufferFromHex(stackSignature)), + signerKey, + Cl.uint(maxAmount), + Cl.uint(authId), + ]; + const response = simnet.callPublicFn( + POX_CONTRACT, + "stack-stx", + stackStxArgs, + address1 + ); + expect(response.result).toHaveClarityType(ClarityType.ResponseOk); + + // advance to cycle 3 + simnet.mineEmptyBlocks(cycleLength * 3); + + const extendSignature = account.client.signPoxSignature({ + authId, + maxAmount, + rewardCycle: 3, + period: 2, + topic: Pox4SignatureTopic.StackExtend, + poxAddress: account.btcAddr, + signerPrivateKey: account.signerPrivKey, + }); + const extendArgs = [ + Cl.uint(2), + poxAddressToTuple(account.btcAddr), + Cl.some(Cl.bufferFromHex(extendSignature)), + signerKey, + Cl.uint(maxAmount), + Cl.uint(authId), + ]; + const { result } = simnet.callPublicFn( + POX_CONTRACT, + "stack-extend", + extendArgs, + address1 + ); + expect(result).toBeErr(Cl.int(26)); + }); + + it("cannot be called indirectly from an unauthorized caller", () => { + const account = stackers[0]; + const burnBlockHeight = 1; + const authId = account.authId; + const period = 2; + + stackStx( + account, + maxAmount, + burnBlockHeight, + period, + maxAmount, + authId, + account.stxAddress + ); + + const rewardCycle = burnHeightToRewardCycle(simnet.blockHeight); + const sigArgs = { + authId, + maxAmount, + rewardCycle, + period, + topic: Pox4SignatureTopic.StackExtend, + poxAddress: account.btcAddr, + signerPrivateKey: account.signerPrivKey, + }; + const signerSignature = account.client.signPoxSignature(sigArgs); + const signerKey = Cl.bufferFromHex(account.signerPubKey); + + const stackExtendArgs = [ + Cl.uint(2), + poxAddressToTuple(account.btcAddr), + Cl.some(Cl.bufferFromHex(signerSignature)), + signerKey, + Cl.uint(maxAmount), + Cl.uint(authId), + ]; + const response = simnet.callPublicFn( + "indirect", + "stack-extend", + stackExtendArgs, + address1 + ); + + expect(response.result).toBeErr( + Cl.int(ERRORS.ERR_STACKING_PERMISSION_DENIED) + ); + }); + + it("can be called indirectly from an authorized caller", () => { + const account = stackers[0]; + const burnBlockHeight = 1; + const authId = account.authId; + const period = 2; + const poxInfo = getPoxInfo(); + const cycleLength = Number(poxInfo.rewardCycleLength); + + allowContractCaller(`${deployer}.indirect`, null, address1); + + stackStx( + account, + maxAmount, + burnBlockHeight, + period, + maxAmount, + authId, + account.stxAddress + ); + + const rewardCycle = burnHeightToRewardCycle(simnet.blockHeight); + const sigArgs = { + authId, + maxAmount, + rewardCycle, + period, + topic: Pox4SignatureTopic.StackExtend, + poxAddress: account.btcAddr, + signerPrivateKey: account.signerPrivKey, + }; + const signerSignature = account.client.signPoxSignature(sigArgs); + const signerKey = Cl.bufferFromHex(account.signerPubKey); + + const stackExtendArgs = [ + Cl.uint(2), + poxAddressToTuple(account.btcAddr), + Cl.some(Cl.bufferFromHex(signerSignature)), + signerKey, + Cl.uint(maxAmount), + Cl.uint(authId), + ]; + const response = simnet.callPublicFn( + "indirect", + "stack-extend", + stackExtendArgs, + address1 + ); + + expect(response.result).toBeOk( + Cl.tuple({ + stacker: Cl.principal(account.stxAddress), + "unlock-burn-height": Cl.uint(5 * cycleLength), + }) + ); + }); + + it("cannot extend for 0 cycles", () => { + const account = stackers[0]; + const burnBlockHeight = 1; + const authId = account.authId; + const period = 2; + + stackStx( + account, + maxAmount, + burnBlockHeight, + period, + maxAmount, + authId, + account.stxAddress + ); + const { result } = stackExtend( + account, + 0, + maxAmount, + authId, + account.stxAddress + ); + + expect(result).toBeErr(Cl.int(ERRORS.ERR_STACKING_INVALID_LOCK_PERIOD)); + }); + + it("errors if not directly stacking", () => { + const account = stackers[0]; + const delegateAccount = stackers[1]; + const authId = account.authId; + const period = 6; + + delegateStx( + maxAmount, + delegateAccount.stxAddress, + null, + null, + account.stxAddress + ); + delegateStackStx( + address1, + maxAmount, + delegateAccount.btcAddr, + 1000, + period, + address2 + ); + + const { result } = stackExtend( + account, + 4, + maxAmount, + authId, + account.stxAddress + ); + expect(result).toBeErr(Cl.int(ERRORS.ERR_STACKING_IS_DELEGATED)); + }); + + it("can change the pox address", () => { + const account = stackers[0]; + const account1 = stackers[1]; + const burnBlockHeight = 1; + const authId = account.authId; + const period = 6; + const extendPeriod = 5; + const poxInfo = getPoxInfo(); + const cycleLength = Number(poxInfo.rewardCycleLength); + + stackStx( + account, + maxAmount, + burnBlockHeight, + period, + maxAmount, + authId, + account.stxAddress + ); + + const rewardCycle = burnHeightToRewardCycle(simnet.blockHeight); + const sigArgs = { + authId, + maxAmount, + rewardCycle, + period: extendPeriod, + topic: Pox4SignatureTopic.StackExtend, + poxAddress: account1.btcAddr, + signerPrivateKey: account.signerPrivKey, + }; + const signerSignature = account.client.signPoxSignature(sigArgs); + const signerKey = Cl.bufferFromHex(account.signerPubKey); + + const stackExtendArgs = [ + Cl.uint(extendPeriod), + poxAddressToTuple(account1.btcAddr), + Cl.some(Cl.bufferFromHex(signerSignature)), + signerKey, + Cl.uint(maxAmount), + Cl.uint(authId), + ]; + + const { result } = simnet.callPublicFn( + POX_CONTRACT, + "stack-extend", + stackExtendArgs, + address1 + ); + + expect(result).toBeOk( + Cl.tuple({ + stacker: Cl.principal(account.stxAddress), + "unlock-burn-height": Cl.uint(12 * cycleLength), + }) + ); + }); + }); + + describe("stack-increase", () => { + it("can increase stacked amount before locked", () => { + const account = stackers[0]; + const burnBlockHeight = 1; + const authId = account.authId; + + stackStx( + account, + maxAmount, + burnBlockHeight, + 2, + maxAmount, + authId, + account.stxAddress + ); + + const { result } = stackIncrease( + account, + maxAmount, + 2, + maxAmount * 2, + authId, + account.stxAddress + ); + expect(result).toBeOk( + Cl.tuple({ + stacker: Cl.principal(account.stxAddress), + "total-locked": Cl.uint(maxAmount * 2), + }) + ); + }); + + it("can increase stacked amount after locked", () => { + const account = stackers[0]; + const burnBlockHeight = 1; + const authId = account.authId; + const poxInfo = getPoxInfo(); + const cycleLength = Number(poxInfo.rewardCycleLength); + + stackStx( + account, + maxAmount, + burnBlockHeight, + 2, + maxAmount, + authId, + account.stxAddress + ); + + simnet.mineEmptyBlocks(cycleLength); + + const { result } = stackIncrease( + account, + maxAmount, + 2, + maxAmount * 2, + authId, + account.stxAddress + ); + expect(result).toBeOk( + Cl.tuple({ + stacker: Cl.principal(account.stxAddress), + "total-locked": Cl.uint(maxAmount * 2), + }) + ); + }); + + it("cannot increase when not stacked", () => { + const account = stackers[0]; + const authId = account.authId; + + const { result } = stackIncrease( + account, + maxAmount, + 2, + maxAmount * 2, + authId, + account.stxAddress + ); + expect(result).toBeErr(Cl.int(ERRORS.ERR_STACK_INCREASE_NOT_LOCKED)); + }); + + it("errors if increase-by amount is 0", () => { + const account = stackers[0]; + const burnBlockHeight = 1; + const authId = account.authId; + + stackStx( + account, + maxAmount, + burnBlockHeight, + 2, + maxAmount, + authId, + account.stxAddress + ); + + const { result } = stackIncrease( + account, + 0, + 2, + maxAmount, + authId, + account.stxAddress + ); + expect(result).toBeErr(Cl.int(ERRORS.ERR_STACKING_INVALID_AMOUNT)); + }); + + it("can stack the entire balance", () => { + const account = stackers[0]; + const burnBlockHeight = 1; + const authId = account.authId; + const balance = simnet + .getAssetsMap() + .get("STX") + ?.get(account.stxAddress)!; + + stackStx( + account, + maxAmount, + burnBlockHeight, + 2, + maxAmount, + authId, + account.stxAddress + ); + + const { result } = stackIncrease( + account, + balance - BigInt(maxAmount), + 2, + 2n ** 128n - 1n, + authId, + account.stxAddress + ); + expect(result).toBeOk( + Cl.tuple({ + stacker: Cl.principal(account.stxAddress), + "total-locked": Cl.uint(balance), + }) + ); + }); + + it("errors on insufficient funds", () => { + const account = stackers[0]; + const burnBlockHeight = 1; + const authId = account.authId; + const balance = simnet + .getAssetsMap() + .get("STX") + ?.get(account.stxAddress)!; + + stackStx( + account, + maxAmount, + burnBlockHeight, + 2, + maxAmount, + authId, + account.stxAddress + ); + + const { result } = stackIncrease( + account, + balance - BigInt(maxAmount) + 1n, + 2, + 2n ** 128n - 1n, + authId, + account.stxAddress + ); + expect(result).toBeErr(Cl.int(ERRORS.ERR_STACKING_INSUFFICIENT_FUNDS)); + }); + + it("cannot be called indirectly from an unauthorized caller", () => { + const account = stackers[0]; + const burnBlockHeight = 1; + const authId = account.authId; + const period = 2; + + stackStx( + account, + maxAmount, + burnBlockHeight, + period, + maxAmount, + authId, + account.stxAddress + ); + + const rewardCycle = burnHeightToRewardCycle(simnet.blockHeight); + const sigArgs = { + authId, + maxAmount, + rewardCycle, + period, + topic: Pox4SignatureTopic.StackIncrease, + poxAddress: account.btcAddr, + signerPrivateKey: account.signerPrivKey, + }; + const signerSignature = account.client.signPoxSignature(sigArgs); + const signerKey = Cl.bufferFromHex(account.signerPubKey); + + const stackIncreaseArgs = [ + Cl.uint(maxAmount), + Cl.some(Cl.bufferFromHex(signerSignature)), + signerKey, + Cl.uint(maxAmount * 2), + Cl.uint(authId), + ]; + + const { result } = simnet.callPublicFn( + "indirect", + "stack-increase", + stackIncreaseArgs, + account.stxAddress + ); + expect(result).toBeErr(Cl.int(ERRORS.ERR_STACKING_PERMISSION_DENIED)); + }); + + it("can be called indirectly from an authorized caller", () => { + const account = stackers[0]; + const burnBlockHeight = 1; + const authId = account.authId; + const period = 2; + + allowContractCaller(`${deployer}.indirect`, null, account.stxAddress); + stackStx( + account, + maxAmount, + burnBlockHeight, + period, + maxAmount, + authId, + account.stxAddress + ); + + const rewardCycle = burnHeightToRewardCycle(simnet.blockHeight); + const sigArgs = { + authId, + maxAmount: maxAmount * 2, + rewardCycle, + period, + topic: Pox4SignatureTopic.StackIncrease, + poxAddress: account.btcAddr, + signerPrivateKey: account.signerPrivKey, + }; + const signerSignature = account.client.signPoxSignature(sigArgs); + const signerKey = Cl.bufferFromHex(account.signerPubKey); + + const stackIncreaseArgs = [ + Cl.uint(maxAmount), + Cl.some(Cl.bufferFromHex(signerSignature)), + signerKey, + Cl.uint(maxAmount * 2), + Cl.uint(authId), + ]; + + const { result } = simnet.callPublicFn( + "indirect", + "stack-increase", + stackIncreaseArgs, + account.stxAddress + ); + expect(result).toBeOk( + Cl.tuple({ + stacker: Cl.principal(account.stxAddress), + "total-locked": Cl.uint(maxAmount * 2), + }) + ); + }); + + it("errors if not directly stacking", () => { + const account = stackers[0]; + const delegateAccount = stackers[1]; + const authId = account.authId; + const period = 6; + + delegateStx( + maxAmount * 2, + delegateAccount.stxAddress, + null, + null, + account.stxAddress + ); + delegateStackStx( + address1, + maxAmount, + delegateAccount.btcAddr, + 1000, + period, + address2 + ); + + const { result } = stackIncrease( + account, + maxAmount, + period, + maxAmount * 2, + authId, + account.stxAddress + ); + expect(result).toBeErr(Cl.int(ERRORS.ERR_STACKING_IS_DELEGATED)); + }); + + it("cannot change the pox address", () => { + const account = stackers[0]; + const account1 = stackers[1]; + const burnBlockHeight = 1; + const authId = account.authId; + const period = 6; + + stackStx( + account, + maxAmount, + burnBlockHeight, + period, + maxAmount, + authId, + account.stxAddress + ); + + const rewardCycle = burnHeightToRewardCycle(simnet.blockHeight); + const sigArgs = { + authId, + maxAmount: maxAmount * 2, + rewardCycle, + period, + topic: Pox4SignatureTopic.StackIncrease, + poxAddress: account1.btcAddr, + signerPrivateKey: account.signerPrivKey, + }; + const signerSignature = account.client.signPoxSignature(sigArgs); + const signerKey = Cl.bufferFromHex(account.signerPubKey); + + const stackIncreaseArgs = [ + Cl.uint(maxAmount), + Cl.some(Cl.bufferFromHex(signerSignature)), + signerKey, + Cl.uint(maxAmount * 2), + Cl.uint(authId), + ]; + + const { result } = simnet.callPublicFn( + POX_CONTRACT, + "stack-increase", + stackIncreaseArgs, + account.stxAddress + ); + expect(result).toBeErr(Cl.int(ERRORS.ERR_INVALID_SIGNATURE_PUBKEY)); + }); + }); +}); diff --git a/contrib/boot-contracts-unit-tests/tsconfig.json b/contrib/boot-contracts-unit-tests/tsconfig.json new file mode 100644 index 00000000000..1bdaf36c465 --- /dev/null +++ b/contrib/boot-contracts-unit-tests/tsconfig.json @@ -0,0 +1,26 @@ + +{ + "compilerOptions": { + "target": "ESNext", + "useDefineForClassFields": true, + "module": "ESNext", + "lib": ["ESNext"], + "skipLibCheck": true, + + "moduleResolution": "bundler", + "allowImportingTsExtensions": true, + "resolveJsonModule": true, + "isolatedModules": true, + "noEmit": true, + + "strict": true, + "noImplicitAny": true, + "noUnusedLocals": true, + "noUnusedParameters": true, + "noFallthroughCasesInSwitch": true + }, + "include": [ + "node_modules/@hirosystems/clarinet-sdk/vitest-helpers/src", + "tests" + ] +} diff --git a/contrib/boot-contracts-unit-tests/vitest.config.js b/contrib/boot-contracts-unit-tests/vitest.config.js new file mode 100644 index 00000000000..e7945ebe026 --- /dev/null +++ b/contrib/boot-contracts-unit-tests/vitest.config.js @@ -0,0 +1,44 @@ +/// + +import { defineConfig } from "vite"; +import { + vitestSetupFilePath, + getClarinetVitestsArgv, +} from "@hirosystems/clarinet-sdk/vitest"; + +/* + In this file, Vitest is configured so that it works seamlessly with Clarinet and the Simnet. + + The `vitest-environment-clarinet` will initialise the clarinet-sdk + and make the `simnet` object available globally in the test files. + + `vitestSetupFilePath` points to a file in the `@hirosystems/clarinet-sdk` package that does two things: + - run `before` hooks to initialize the simnet and `after` hooks to collect costs and coverage reports. + - load custom vitest matchers to work with Clarity values (such as `expect(...).toBeUint()`) + + The `getClarinetVitestsArgv()` will parse options passed to the command `vitest run --` + - vitest run -- --manifest ./Clarinet.toml # pass a custom path + - vitest run -- --coverage --costs # collect coverage and cost reports +*/ + +export default defineConfig({ + test: { + environment: "clarinet", // use vitest-environment-clarinet + pool: "forks", + poolOptions: { + forks: { singleFork: true }, + }, + setupFiles: [ + vitestSetupFilePath, + // custom setup files can be added here + ], + environmentOptions: { + clarinet: { + ...getClarinetVitestsArgv(), + includeBootContracts: true, + bootContractsPath: `${process.cwd()}/boot_contracts`, + // add or override options + }, + }, + }, +}); diff --git a/contrib/core-contract-tests/package-lock.json b/contrib/core-contract-tests/package-lock.json index 50ded82d383..f15153b12e8 100644 --- a/contrib/core-contract-tests/package-lock.json +++ b/contrib/core-contract-tests/package-lock.json @@ -11,7 +11,8 @@ "dependencies": { "@hirosystems/clarinet-sdk": "^2.4.1", "@stacks/clarunit": "0.0.1", - "@stacks/transactions": "^6.12.0", + "@stacks/stacking": "^6.13.2", + "@stacks/transactions": "^6.13.0", "chokidar-cli": "^3.0.0", "fast-check": "^3.15.1", "typescript": "^5.4.2", @@ -448,9 +449,9 @@ } }, "node_modules/@hirosystems/clarinet-sdk-wasm": { - "version": "2.4.0", - "resolved": "https://registry.npmjs.org/@hirosystems/clarinet-sdk-wasm/-/clarinet-sdk-wasm-2.4.0.tgz", - "integrity": "sha512-qApXWsnWRtQcj5BsqoKd+AsEtDURA5CJQcRxgCAVjyRSjkbGJXxNgrW9oRnIkfIIKJ6D5mV7JGrr8CQ8BSJ/tg==" + "version": "2.4.2", + "resolved": "https://registry.npmjs.org/@hirosystems/clarinet-sdk-wasm/-/clarinet-sdk-wasm-2.4.2.tgz", + "integrity": "sha512-85RrDiqrfup/h7XLqysdm/J4csmimCRTXHnCiD+4HyKHVhgr7HWL7sGEGpGfThjPxukjV8A+b2GF2x9Rufpz9g==" }, "node_modules/@humanwhocodes/config-array": { "version": "0.11.14", @@ -1219,23 +1220,23 @@ } }, "node_modules/@stacks/common": { - "version": "6.10.0", - "resolved": "https://registry.npmjs.org/@stacks/common/-/common-6.10.0.tgz", - "integrity": "sha512-6x5Z7AKd9/kj3+DYE9xIDIkFLHihBH614i2wqrZIjN02WxVo063hWSjIlUxlx8P4gl6olVzlOy5LzhLJD9OP0A==", + "version": "6.13.0", + "resolved": "https://registry.npmjs.org/@stacks/common/-/common-6.13.0.tgz", + "integrity": "sha512-wwzyihjaSdmL6NxKvDeayy3dqM0L0Q2sawmdNtzJDi0FnXuJGm5PeapJj7bEfcI9XwI7Bw5jZoC6mCn9nc5YIw==", "dependencies": { "@types/bn.js": "^5.1.0", "@types/node": "^18.0.4" } }, "node_modules/@stacks/encryption": { - "version": "6.12.0", - "resolved": "https://registry.npmjs.org/@stacks/encryption/-/encryption-6.12.0.tgz", - "integrity": "sha512-CubE51pHrcxx3yA+xapevPgA9UDleIoEaUZ06/9uD91B42yvTg37HyS8t06rzukU9q+X7Cv2I/+vbuf4nJIo8g==", + "version": "6.13.1", + "resolved": "https://registry.npmjs.org/@stacks/encryption/-/encryption-6.13.1.tgz", + "integrity": "sha512-y5IFX3/nGI3fCk70gE0JwH70GpshD8RhUfvhMLcL96oNaec1cCdj1ZUiQupeicfYTHuraaVBYU9xLls4TRmypg==", "dependencies": { "@noble/hashes": "1.1.5", "@noble/secp256k1": "1.7.1", "@scure/bip39": "1.1.0", - "@stacks/common": "^6.10.0", + "@stacks/common": "^6.13.0", "@types/node": "^18.0.4", "base64-js": "^1.5.1", "bs58": "^5.0.0", @@ -1244,25 +1245,26 @@ } }, "node_modules/@stacks/network": { - "version": "6.11.3", - "resolved": "https://registry.npmjs.org/@stacks/network/-/network-6.11.3.tgz", - "integrity": "sha512-c4ClCU/QUwuu8NbHtDKPJNa0M5YxauLN3vYaR0+S4awbhVIKFQSxirm9Q9ckV1WBh7FtD6u2S0x+tDQGAODjNg==", + "version": "6.13.0", + "resolved": "https://registry.npmjs.org/@stacks/network/-/network-6.13.0.tgz", + "integrity": "sha512-Ss/Da4BNyPBBj1OieM981fJ7SkevKqLPkzoI1+Yo7cYR2df+0FipIN++Z4RfpJpc8ne60vgcx7nJZXQsiGhKBQ==", "dependencies": { - "@stacks/common": "^6.10.0", + "@stacks/common": "^6.13.0", "cross-fetch": "^3.1.5" } }, "node_modules/@stacks/stacking": { - "version": "6.12.0", - "resolved": "https://registry.npmjs.org/@stacks/stacking/-/stacking-6.12.0.tgz", - "integrity": "sha512-XBxwbaCGRPnjpjspb3CBXrlZl6xR+gghLMz9PQNPdpuIbBDFa0SGeHgqjtpVU+2DVL4UyBx8PVsAWtlssyVGng==", + "version": "6.13.2", + "resolved": "https://registry.npmjs.org/@stacks/stacking/-/stacking-6.13.2.tgz", + "integrity": "sha512-4h1UQuL2+Xdra9zMqzUElvKG9X9fenuNE7hD9sIqyxyLFxeQ7gRqczmTYPsmaj4wY5004JNj+efzGJ0VmpOcAA==", "dependencies": { + "@noble/hashes": "1.1.5", "@scure/base": "1.1.1", - "@stacks/common": "^6.10.0", - "@stacks/encryption": "^6.12.0", - "@stacks/network": "^6.11.3", + "@stacks/common": "^6.13.0", + "@stacks/encryption": "^6.13.1", + "@stacks/network": "^6.13.0", "@stacks/stacks-blockchain-api-types": "^0.61.0", - "@stacks/transactions": "^6.12.0", + "@stacks/transactions": "^6.13.1", "bs58": "^5.0.0" } }, @@ -1283,14 +1285,14 @@ "integrity": "sha512-yPOfTUboo5eA9BZL/hqMcM71GstrFs9YWzOrJFPeP4cOO1wgYvAcckgBRbgiE3NqeX0A7SLZLDAXLZbATuRq9w==" }, "node_modules/@stacks/transactions": { - "version": "6.12.0", - "resolved": "https://registry.npmjs.org/@stacks/transactions/-/transactions-6.12.0.tgz", - "integrity": "sha512-gRP3SfTaAIoTdjMvOiLrMZb/senqB8JQlT5Y4C3/CiHhiprYwTx7TbOCSa7WsNOU99H4aNfHvatmymuggXQVkA==", + "version": "6.13.1", + "resolved": "https://registry.npmjs.org/@stacks/transactions/-/transactions-6.13.1.tgz", + "integrity": "sha512-PWw2I+2Fj3CaFYQIoVcqQN6E2qGHNhFv03nuR0CxMq0sx8stPgYZbdzUlnlBcJQdsFiHrw3sPeqnXDZt+Hg5YQ==", "dependencies": { "@noble/hashes": "1.1.5", "@noble/secp256k1": "1.7.1", - "@stacks/common": "^6.10.0", - "@stacks/network": "^6.11.3", + "@stacks/common": "^6.13.0", + "@stacks/network": "^6.13.0", "c32check": "^2.0.0", "lodash.clonedeep": "^4.5.0" } diff --git a/contrib/core-contract-tests/package.json b/contrib/core-contract-tests/package.json index 7ba3ba62e21..fe3dee2eb58 100644 --- a/contrib/core-contract-tests/package.json +++ b/contrib/core-contract-tests/package.json @@ -13,7 +13,8 @@ "dependencies": { "@hirosystems/clarinet-sdk": "^2.4.1", "@stacks/clarunit": "0.0.1", - "@stacks/transactions": "^6.12.0", + "@stacks/stacking": "^6.13.2", + "@stacks/transactions": "^6.13.0", "chokidar-cli": "^3.0.0", "fast-check": "^3.15.1", "typescript": "^5.4.2", diff --git a/contrib/tools/local-mutation-testing.sh b/contrib/tools/local-mutation-testing.sh new file mode 100755 index 00000000000..11da6810e54 --- /dev/null +++ b/contrib/tools/local-mutation-testing.sh @@ -0,0 +1,87 @@ +#!/bin/bash + +set -euo pipefail + +# Install cargo-mutants +cargo install --version 24.7.1 cargo-mutants --locked + +# Create diff file between current branch and develop branch +git diff origin/develop...HEAD > git.diff + +# Remove git diff files about removed/renamed files +awk ' + /^diff --git/ { + diff_line = $0 + getline + if ($0 !~ /^(deleted file mode|similarity index)/) { + print diff_line + print + } + } + !/^(diff --git|deleted file mode|similarity index|rename from|rename to)/ {print} +' git.diff > processed.diff + +# Extract mutants based on the processed diff +cargo mutants --in-diff processed.diff --list > all_mutants.txt + +# Create a directory for organizing mutants +mkdir -p mutants_by_package + +# Organize mutants into files based on their main folder +while IFS= read -r line; do + package=$(echo "$line" | cut -d'/' -f1) + + case $package in + "stackslib") + echo "$line" >> "mutants_by_package/stackslib.txt" + ;; + "testnet") + echo "$line" >> "mutants_by_package/stacks-node.txt" + ;; + "stacks-signer") + echo "$line" >> "mutants_by_package/stacks-signer.txt" + ;; + *) + echo "$line" >> "mutants_by_package/small-packages.txt" + ;; + esac +done < all_mutants.txt + +# Function to run mutants for a package +run_mutants() { + local package=$1 + local threshold=$2 + local output_dir=$3 + local mutant_file="mutants_by_package/${package}.txt" + + if [ ! -f "$mutant_file" ]; then + echo "No mutants found for $package" + return 0 + fi + + local regex_pattern=$(sed 's/[][()\.^$*+?{}|]/\\&/g' "$mutant_file" | paste -sd'|' -) + local mutant_count=$(cargo mutants -F "$regex_pattern" -E ": replace .{1,2} with .{1,2} in " --list | wc -l) + + if [ "$mutant_count" -gt "$threshold" ]; then + echo "Running mutants for $package ($mutant_count mutants)" + RUST_BACKTRACE=1 BITCOIND_TEST=1 \ + cargo mutants --timeout-multiplier 1.5 --no-shuffle -vV \ + -F "$regex_pattern" \ + -E ": replace .{1,2} with .{1,2} in " \ + --output "$output_dir" \ + --test-tool=nextest \ + --package "$package" \ + -- --all-targets --test-threads 1 || true + + echo $? > "${output_dir}/exit_code.txt" + else + echo "Skipping $package, only $mutant_count mutants (threshold: $threshold)" + fi + + return 0 +} + +# Run mutants for each wanted package +run_mutants "stacks-signer" 500 "./stacks-signer_mutants" || true +run_mutants "stacks-node" 540 "./stacks-node_mutants" || true +run_mutants "stackslib" 72 "./stackslib_mutants" || true diff --git a/docs/ci-release.md b/docs/ci-release.md index 4e21ed631df..f7881ba675e 100644 --- a/docs/ci-release.md +++ b/docs/ci-release.md @@ -23,7 +23,7 @@ All releases are built via a Github Actions workflow named `CI` ([ci.yml](../.gi - `stacks-core:` - An untagged build of any branch will produce a single image built from source on Debian with glibc: - `stacks-core:` -- A tagged release on a non-default branch will produces: +- A tagged release on a non-default branch will produce: - Docker Alpine image for several architectures tagged with: - `stacks-core:` - Docker Debian image for several architectures tagged with: @@ -83,7 +83,7 @@ There are also 2 different methods in use with regard to running tests: A matrix is used when there are several known tests that need to be run. Partitions (shards) are used when there is a large and unknown number of tests to run (ex: `cargo test` to run all tests). There is also a workflow designed to run tests that are manually triggered: [Standalone Tests](../.github/workflows/standalone-tests.yml). -This workflow requires you to select which test(s) you want to run, which then triggers a reusbale workflow via conditional. For example, selecting "Epoch Tests" will run the tests defined in [Epoch Tests](../.github/workflows/epoch-tests.yml). Likewise, selecting `Release Tests` will run the same tests as a release workflow. +This workflow requires you to select which test(s) you want to run, which then triggers a reusable workflow via conditional. For example, selecting "Epoch Tests" will run the tests defined in [Epoch Tests](../.github/workflows/epoch-tests.yml). Likewise, selecting `Release Tests` will run the same tests as a release workflow. Files: diff --git a/docs/mining.md b/docs/mining.md index 8b824924f77..e113f12d933 100644 --- a/docs/mining.md +++ b/docs/mining.md @@ -3,7 +3,7 @@ Stacks tokens (STX) are mined by transferring BTC via PoX. To run as a miner, you should make sure to add the following config fields to your config file: -``` +```toml [node] # Run as a miner miner = True @@ -25,6 +25,8 @@ first_attempt_time_ms = 1000 subsequent_attempt_time_ms = 60000 # Time to spend mining a microblock, in milliseconds. microblock_attempt_time_ms = 30000 +# Time to spend mining a Nakamoto block, in milliseconds. +nakamoto_attempt_time_ms = 20000 ``` You can verify that your node is operating as a miner by checking its log output @@ -40,7 +42,7 @@ INFO [1630127492.062652] [testnet/stacks-node/src/run_loop/neon.rs:164] [main] U Fee and cost estimators can be configured via the config section `[fee_estimation]`: -``` +```toml [fee_estimation] cost_estimator = naive_pessimistic fee_estimator = fuzzed_weighted_median_fee_rate diff --git a/docs/mutation-testing.md b/docs/mutation-testing.md new file mode 100644 index 00000000000..85fcd89a7f6 --- /dev/null +++ b/docs/mutation-testing.md @@ -0,0 +1,146 @@ +# Mutation Testing + +This document describes how to run mutation testing locally to mimic the outcome of a PR, without the CI limitation it provides by timing out after 6 hours. +[Here is the script](../contrib/tools/local-mutation-testing.sh) to run the tests locally by running the mutants created by the changes between `HEAD` and develop. +It does automatically all the steps explained below. + +From the root level of the stacks-core repository run +```sh +./contrib/tools/local-mutation-testing.sh +``` + +## Prerequirements + +Install the cargo mutants library +```sh +cargo install --version 24.7.1 cargo-mutants --locked +``` + + +## Steps +1. Be on source branch you would use for the PR. +2. Create diff file comparing this branch with the `develop` branch + ```sh + git diff origin/develop..HEAD > git.diff + ``` +3. Clean up the diff file and create auxiliary files + ```sh + awk ' + /^diff --git/ { + diff_line = $0 + getline + if ($0 !~ /^(deleted file mode|similarity index)/) { + print diff_line + print + } + } + !/^(diff --git|deleted file mode|similarity index|rename from|rename to)/ {print} + ' git.diff > processed.diff + + # Extract mutants based on the processed diff + cargo mutants --in-diff processed.diff --list > all_mutants.txt + + # Create a directory for organizing mutants + mkdir -p mutants_by_package + + # Organize mutants into files based on their main folder + while IFS= read -r line; do + package=$(echo "$line" | cut -d'/' -f1) + + case $package in + "stackslib") + echo "$line" >> "mutants_by_package/stackslib.txt" + ;; + "testnet") + echo "$line" >> "mutants_by_package/stacks-node.txt" + ;; + "stacks-signer") + echo "$line" >> "mutants_by_package/stacks-signer.txt" + ;; + *) + echo "$line" >> "mutants_by_package/small-packages.txt" + ;; + esac + done < all_mutants.txt + ``` +4. Based on the package required to run the mutants for + a. Stackslib package + ```sh + regex_pattern=$(sed 's/[][()\.^$*+?{}|]/\\&/g' "mutants_by_package/stackslib.txt" | paste -sd'|' -) + + RUST_BACKTRACE=1 BITCOIND_TEST=1 \ + cargo mutants --timeout-multiplier 1.5 --no-shuffle -vV \ + -F "$regex_pattern" \ + -E ": replace .{1,2} with .{1,2} in " \ + --output "./stackslib_mutants" \ + --test-tool=nextest \ + -- --all-targets --test-threads 1 + ``` + b. Stacks-node (testnet) package + ```sh + regex_pattern=$(sed 's/[][()\.^$*+?{}|]/\\&/g' "mutants_by_package/testnet.txt" | paste -sd'|' -) + + RUST_BACKTRACE=1 BITCOIND_TEST=1 \ + cargo mutants --timeout-multiplier 1.5 --no-shuffle -vV \ + -F "$regex_pattern" \ + -E ": replace .{1,2} with .{1,2} in " \ + --output "./testnet_mutants" \ + --test-tool=nextest \ + -- --all-targets --test-threads 1 + ``` + c. Stacks-signer + ```sh + regex_pattern=$(sed 's/[][()\.^$*+?{}|]/\\&/g' "mutants_by_package/stacks-signer.txt" | paste -sd'|' -) + + RUST_BACKTRACE=1 BITCOIND_TEST=1 \ + cargo mutants --timeout-multiplier 1.5 --no-shuffle -vV \ + -F "$regex_pattern" \ + -E ": replace .{1,2} with .{1,2} in " \ + --output "./stacks-signer_mutants" \ + --test-tool=nextest \ + -- --all-targets --test-threads 1 + ``` + d. All other packages combined + ```sh + regex_pattern=$(sed 's/[][()\.^$*+?{}|]/\\&/g' "mutants_by_package/small-packages.txt" | paste -sd'|' -) + + cargo mutants --timeout-multiplier 1.5 --no-shuffle -vV \ + -F "$regex_pattern" \ + -E ": replace .{1,2} with .{1,2} in " \ + --output "./small-packages_mutants" \ + --test-tool=nextest \ + -- --all-targets --test-threads 1 + ``` + +## How to run one specific mutant to test it + +Example of output which had a missing mutant +```sh +MISSED stacks-signer/src/runloop.rs:424:9: replace >::run_one_pass -> Option> with None in 3.0s build + 9.3s test +``` + +Example of fix for it +```sh +RUST_BACKTRACE=1 BITCOIND_TEST=1 \ +cargo mutants -vV \ + -F "replace process_stackerdb_event" \ + -E ": replace >::run_one_pass -> Option> with None in " \ + --test-tool=nextest \ + -- \ + --run-ignored all \ + --fail-fast \ + --test-threads 1 +``` + +General command to run +```sh +RUST_BACKTRACE=1 BITCOIND_TEST=1 \ +cargo mutants -vV \ + -F "replace process_stackerdb_event" \ + -E ": replace [modify this] with [modify this] in " \ + --test-tool=nextest \ + -- \ + --run-ignored all \ + --fail-fast \ + --test-threads 1 +``` diff --git a/docs/profiling.md b/docs/profiling.md index 25f821d2c91..832b3d44572 100644 --- a/docs/profiling.md +++ b/docs/profiling.md @@ -224,7 +224,7 @@ $ sudo sed -i "$ a kernel.kptr_restrict = 0" /etc/sysctl.conf $ sysctl --system ``` -Note that you need to uncomment the following in `.cargo/config` (see [flamegraph-rs](https://github.com/flamegraph-rs/flamegraph) for details) +Note that you need to uncomment the following in `.cargo/config.toml` (see [flamegraph-rs](https://github.com/flamegraph-rs/flamegraph) for details) ``` [target.x86_64-unknown-linux-gnu] diff --git a/docs/release-process.md b/docs/release-process.md index 1e833caf66b..5e2be08b5d1 100644 --- a/docs/release-process.md +++ b/docs/release-process.md @@ -11,13 +11,12 @@ | Linux ARMv7 | _builds are provided but not tested_ | | Linux ARM64 | _builds are provided but not tested_ | -For help cross-compiling on memory-constrained devices (such as a Raspberry Pi), please see the community supported documentation here: [Cross Compiling](https://github.com/dantrevino/cross-compiling-stacks-blockchain/blob/master/README.md). ## Release Schedule and Hotfixes Normal releases in this repository that add features such as improved RPC endpoints, improved boot-up time, new event observer fields or event types, etc., are released on a monthly schedule. The currently staged changes for such releases -are in the [develop branch](https://github.com/stacks-network/stacks-blockchain/tree/develop). It is generally safe to run +are in the [develop branch](https://github.com/stacks-network/stacks-core/tree/develop). It is generally safe to run a `stacks-node` from that branch, though it has received less rigorous testing than release tags. If bugs are found in the `develop` branch, please do report them as issues on this repository. @@ -52,13 +51,14 @@ For non-consensus breaking releases, this project uses the following release pro 1. The release must be timed so that it does not interfere with a _prepare phase_. The timing of the next Stacking cycle can be found - [here](https://stacking.club/cycles/next). A release to `mainnet` should happen + [here](https://stx.eco/dao/tools?tool=2). A release should happen at least 24 hours before the start of a new cycle, to avoid interfering with the prepare phase. So, start by being aware of when the release can happen. 1. Before creating the release, the release manager must determine the _version - number_ for this release. The factors that determine the version number are + number_ for this release, and create a release branch in the format: `release/X.Y.Z.A.n`. + The factors that determine the version number are discussed in [Versioning](#versioning). We assume, in this section, that the change is not consensus-breaking. So, the release manager must first determine whether there are any "non-consensus-breaking changes that require a @@ -66,32 +66,24 @@ For non-consensus breaking releases, this project uses the following release pro changed, but an automatic migration was not implemented. Then, the release manager should determine whether this is a feature release, as opposed to a hotfix or a patch. Given the answers to these questions, the version number can be computed. - + 1. The release manager enumerates the PRs or issues that would _block_ the release. A label should be applied to each such issue/PR as - `2.0.x.y.z-blocker`. The release manager should ping these + `X.Y.Z.A.n-blocker`. The release manager should ping these issue/PR owners for updates on whether or not those issues/PRs have any blockers or are waiting on feedback. -1. The release manager should open a `develop -> master` PR. This can be done before - all the blocker PRs have merged, as it is helpful for the manager and others - to see the staged changes. - 1. The release manager must update the `CHANGELOG.md` file with summaries what was `Added`, `Changed`, and `Fixed`. The pull requests merged into `develop` can be found - [here](https://github.com/stacks-network/stacks-blockchain/pulls?q=is%3Apr+is%3Aclosed+base%3Adevelop+sort%3Aupdated-desc). Note, however, that GitHub apparently does not allow sorting by + [here](https://github.com/stacks-network/stacks-core/pulls?q=is%3Apr+is%3Aclosed+base%3Adevelop+sort%3Aupdated-desc). Note, however, that GitHub apparently does not allow sorting by _merge time_, so, when sorting by some proxy criterion, some care should - be used to understand which PR's were _merged_ after the last `develop -> -master` release PR. This `CHANGELOG.md` should also be used as the description - of the `develop -> master` so that it acts as _release notes_ when the branch - is tagged. + be used to understand which PR's were _merged_ after the last release. 1. Once the blocker PRs have merged, the release manager will create a new tag - by manually triggering the [`stacks-blockchain` Github Actions workflow](https://github.com/stacks-network/stacks-blockchain/actions/workflows/stacks-blockchain.yml) - against the `develop` branch, inputting the release candidate tag, `2.0.x.y.z-rc0`, - in the Action's input textbox. - + by manually triggering the [`CI` Github Actions workflow](https://github.com/stacks-network/stacks-core/actions/workflows/ci.yml) + against the `release/X.Y.Z.A.n` branch. + 1. Once the release candidate has been built, and docker images, etc. are available, the release manager will notify various ecosystem participants to test the release candidate on various staging infrastructure: @@ -104,7 +96,7 @@ master` release PR. This `CHANGELOG.md` should also be used as the description Stacks Discord. For coordinating rollouts on specific infrastructure, the release manager should contact the above participants directly either through e-mail or Discord DM. The release manager should also confirm that the built release on the - [Github releases](https://github.com/stacks-network/stacks-blockchain/releases/) + [Github releases](https://github.com/stacks-network/stacks-core/releases/) page is marked as `Pre-Release`. 1. The release manager will test that the release candidate successfully syncs with @@ -119,16 +111,9 @@ master` release PR. This `CHANGELOG.md` should also be used as the description even if other community members and developers may be addressing the discovered issues. -1. Once the final release candidate has rolled out successfully without issue on the - above staging infrastructure, the release manager tags 2 additional `stacks-blockchain` - team members to review the `develop -> master` PR. If there is a merge conflict in this - PR, this is the protocol: open a branch off of develop, merge master into that branch, - and then open a PR from this side branch to develop. The merge conflicts will be - resolved. - -1. Once reviewed and approved, the release manager merges the PR, and tags the release - via the [`stacks-blockchain` Github action](https://github.com/stacks-network/stacks-blockchain/actions/workflows/stacks-blockchain.yml) - by clicking "Run workflow" and providing the release version as the tag (e.g., - `2.0.11.1.0`) This creates a release and release images. Once the release has been - created, the release manager should update the Github release text with the - `CHANGELOG.md` "top-matter" for the release. +1. Once the final release candidate has rolled out successfully without issue on staging + infrastructure, the tagged release shall no longer marked as Pre-Release on the [Github releases](https://github.com/stacks-network/stacks-core/releases/) page. + Announcements will then be shared in the `#stacks-core-devs` channel in the + Stacks Discord, as well as the [mailing list](https://groups.google.com/a/stacks.org/g/announce). + +1. Finally, the release branch `release/X.Y.Z.A.n` will be PR'ed into the `master` branch, and once merged, a PR for `master->develop` will be opened. diff --git a/docs/rpc-endpoints.md b/docs/rpc-endpoints.md index 6815adfc61c..6163f27b753 100644 --- a/docs/rpc-endpoints.md +++ b/docs/rpc-endpoints.md @@ -111,7 +111,7 @@ Returns a JSON list containing the following: ``` The `consensus_hash` field identifies the sortition in which the given block was -chosen. The `header` is the raw block header, a a hex string. The +chosen. The `header` is the raw block header, a hex string. The `parent_block_id` is the block ID hash of this block's parent, and can be used as a `?tip=` query parameter to page through deeper and deeper block headers. @@ -143,8 +143,8 @@ Returns JSON data in the form: } ``` -Where balance is the hex encoding of a unsigned 128-bit integer -(big-endian), nonce is a unsigned 64-bit integer, and the proofs are +Where balance is the hex encoding of an unsigned 128-bit integer +(big-endian), nonce is an unsigned 64-bit integer, and the proofs are provided as hex strings. For non-existent accounts, this _does not_ 404, rather it returns an @@ -212,7 +212,7 @@ JSON object _without_ the `proof` field. ### GET /v2/fees/transfer -Get an estimated fee rate for STX transfer transactions. This a a fee rate / byte, and is returned as a JSON integer. +Get an estimated fee rate for STX transfer transactions. This is a fee rate / byte, and is returned as a JSON integer. ### GET /v2/contracts/interface/[Stacks Address]/[Contract Name] @@ -530,6 +530,6 @@ Return metadata about the highest-known tenure, as the following JSON structure: Here, `consensus_hash` identifies the highest-known tenure (which may not be the highest sortition), `reward_cycle` identifies the reward cycle number of this -tenure, `tip_block_id` idenitifies the highest-known block in this tenure, and +tenure, `tip_block_id` identifies the highest-known block in this tenure, and `tip_height` identifies that block's height. diff --git a/docs/rpc/api/core-node/get-info.example.json b/docs/rpc/api/core-node/get-info.example.json index 19bb6d20f4c..afc42e6f686 100644 --- a/docs/rpc/api/core-node/get-info.example.json +++ b/docs/rpc/api/core-node/get-info.example.json @@ -11,5 +11,6 @@ "stacks_tip": "b1807a2d3f7f8c7922f7c1d60d7c34145ade05d789640dc7dc9ec1021e07bb54", "stacks_tip_consensus_hash": "17f76e597bab45646956f38dd39573085d72cbc0", "unanchored_tip": "0000000000000000000000000000000000000000000000000000000000000000", - "exit_at_block_height": null + "exit_at_block_height": null, + "is_fully_synced": false } diff --git a/docs/rpc/api/core-node/get-info.schema.json b/docs/rpc/api/core-node/get-info.schema.json index f37cd0893fe..16b560ed5ef 100644 --- a/docs/rpc/api/core-node/get-info.schema.json +++ b/docs/rpc/api/core-node/get-info.schema.json @@ -17,7 +17,8 @@ "stacks_tip", "stacks_tip_consensus_hash", "unanchored_tip", - "exit_at_block_height" + "exit_at_block_height", + "is_fully_synced" ], "properties": { "peer_version": { @@ -71,6 +72,10 @@ "exit_at_block_height": { "type": "integer", "description": "the block height at which the testnet network will be reset. not applicable for mainnet" + }, + "is_fully_synced": { + "type": "boolean", + "description": "indicates whether the node has fully synchronized with the network" } } } diff --git a/libsigner/Cargo.toml b/libsigner/Cargo.toml index e04dcbbdc1a..7da9801674a 100644 --- a/libsigner/Cargo.toml +++ b/libsigner/Cargo.toml @@ -18,8 +18,10 @@ path = "./src/libsigner.rs" [dependencies] clarity = { path = "../clarity" } hashbrown = { workspace = true } +lazy_static = "1.4.0" libc = "0.2" libstackerdb = { path = "../libstackerdb" } +prometheus = { version = "0.9", optional = true } serde = "1" serde_derive = "1" serde_stacker = "0.1" @@ -33,6 +35,7 @@ tiny_http = "0.12" wsts = { workspace = true } [dev-dependencies] +mutants = "0.0.3" rand_core = { workspace = true } rand = { workspace = true } @@ -49,3 +52,6 @@ sha2 = { version = "0.10", features = ["asm"] } [target.'cfg(any(not(any(target_arch = "x86_64", target_arch = "x86", target_arch = "aarch64")), any(target_os = "windows")))'.dependencies] sha2 = { version = "0.10" } + +[features] +monitoring_prom = ["prometheus"] \ No newline at end of file diff --git a/libsigner/src/error.rs b/libsigner/src/error.rs index 101a1b35e92..7c4deadf1b1 100644 --- a/libsigner/src/error.rs +++ b/libsigner/src/error.rs @@ -1,5 +1,5 @@ // Copyright (C) 2013-2020 Blockstack PBC, a public benefit corporation -// Copyright (C) 2020-2023 Stacks Open Internet Foundation +// Copyright (C) 2020-2024 Stacks Open Internet Foundation // // This program is free software: you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by @@ -71,4 +71,7 @@ pub enum EventError { /// Unrecognized stacker DB contract error #[error("Unrecognized StackerDB contract: {0}")] UnrecognizedStackerDBContract(QualifiedContractIdentifier), + /// Empty chunks event + #[error("Empty chunks event")] + EmptyChunksEvent, } diff --git a/libsigner/src/events.rs b/libsigner/src/events.rs index 2d156559ff2..6dbc10110a8 100644 --- a/libsigner/src/events.rs +++ b/libsigner/src/events.rs @@ -1,5 +1,5 @@ // Copyright (C) 2013-2020 Blockstack PBC, a public benefit corporation -// Copyright (C) 2020-2023 Stacks Open Internet Foundation +// Copyright (C) 2020-2024 Stacks Open Internet Foundation // // This program is free software: you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by @@ -14,11 +14,13 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . +use std::fmt::Debug; use std::io::{Read, Write}; use std::net::{SocketAddr, TcpListener, TcpStream}; use std::sync::atomic::{AtomicBool, Ordering}; use std::sync::mpsc::Sender; use std::sync::Arc; +use std::time::SystemTime; use blockstack_lib::chainstate::nakamoto::NakamotoBlock; use blockstack_lib::chainstate::stacks::boot::{MINERS_NAME, SIGNERS_NAME}; @@ -38,8 +40,11 @@ use stacks_common::codec::{ StacksMessageCodec, }; pub use stacks_common::consts::SIGNER_SLOTS_PER_USER; -use stacks_common::types::chainstate::StacksPublicKey; -use stacks_common::util::hash::Sha512Trunc256Sum; +use stacks_common::types::chainstate::{ + BlockHeaderHash, BurnchainHeaderHash, ConsensusHash, SortitionId, StacksPublicKey, +}; +use stacks_common::util::hash::{Hash160, Sha512Trunc256Sum}; +use stacks_common::util::HexError; use tiny_http::{ Method as HttpMethod, Request as HttpRequest, Response as HttpResponse, Server as HttpServer, }; @@ -51,11 +56,19 @@ use wsts::net::{ use wsts::state_machine::signer; use crate::http::{decode_http_body, decode_http_request}; -use crate::{EventError, SignerMessage}; +use crate::EventError; + +/// Define the trait for the event processor +pub trait SignerEventTrait: + StacksMessageCodec + Clone + Debug + Send +{ +} + +impl SignerEventTrait for T {} #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] /// BlockProposal sent to signers -pub struct BlockProposalSigners { +pub struct BlockProposal { /// The block itself pub block: NakamotoBlock, /// The burn height the block is mined during @@ -64,30 +77,7 @@ pub struct BlockProposalSigners { pub reward_cycle: u64, } -/// Event enum for newly-arrived signer subscribed events -#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] -pub enum SignerEvent { - /// A miner sent a message over .miners - /// The `Vec` will contain any block proposals made by the miner during this StackerDB event. - /// The `Vec` will contain any signer WSTS messages made by the miner while acting as a coordinator. - /// The `Option` will contain the message sender's public key if either of the vecs is non-empty. - MinerMessages( - Vec, - Vec, - Option, - ), - /// The signer messages for other signers and miners to observe - /// The u32 is the signer set to which the message belongs (either 0 or 1) - SignerMessages(u32, Vec), - /// A new block proposal validation response from the node - BlockValidationResponse(BlockValidateResponse), - /// Status endpoint request - StatusCheck, - /// A new burn block event was received with the given burnchain block height - NewBurnBlock(u64), -} - -impl StacksMessageCodec for BlockProposalSigners { +impl StacksMessageCodec for BlockProposal { fn consensus_serialize(&self, fd: &mut W) -> Result<(), CodecError> { self.block.consensus_serialize(fd)?; self.burn_height.consensus_serialize(fd)?; @@ -99,7 +89,7 @@ impl StacksMessageCodec for BlockProposalSigners { let block = NakamotoBlock::consensus_deserialize(fd)?; let burn_height = u64::consensus_deserialize(fd)?; let reward_cycle = u64::consensus_deserialize(fd)?; - Ok(BlockProposalSigners { + Ok(BlockProposal { block, burn_height, reward_cycle, @@ -107,6 +97,31 @@ impl StacksMessageCodec for BlockProposalSigners { } } +/// Event enum for newly-arrived signer subscribed events +#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] +pub enum SignerEvent { + /// A miner sent a message over .miners + /// The `Vec` will contain any signer messages made by the miner. + /// The `StacksPublicKey` is the message sender's public key. + MinerMessages(Vec, StacksPublicKey), + /// The signer messages for other signers and miners to observe + /// The u32 is the signer set to which the message belongs (either 0 or 1) + SignerMessages(u32, Vec), + /// A new block proposal validation response from the node + BlockValidationResponse(BlockValidateResponse), + /// Status endpoint request + StatusCheck, + /// A new burn block event was received with the given burnchain block height + NewBurnBlock { + /// the burn height for the newly processed burn block + burn_height: u64, + /// the burn hash for the newly processed burn block + burn_header_hash: BurnchainHeaderHash, + /// the time at which this event was received by the signer's event processor + received_time: SystemTime, + }, +} + /// Trait to implement a stop-signaler for the event receiver thread. /// The caller calls `send()` and the event receiver loop (which lives in a separate thread) will /// terminate. @@ -116,7 +131,7 @@ pub trait EventStopSignaler { } /// Trait to implement to handle signer specific events sent by the Stacks node -pub trait EventReceiver { +pub trait EventReceiver { /// The implementation of ST will ensure that a call to ST::send() will cause /// the call to `is_stopped()` below to return true. type ST: EventStopSignaler + Send + Sync; @@ -124,11 +139,11 @@ pub trait EventReceiver { /// Open a server socket to the given socket address. fn bind(&mut self, listener: SocketAddr) -> Result; /// Return the next event - fn next_event(&mut self) -> Result; + fn next_event(&mut self) -> Result, EventError>; /// Add a downstream event consumer - fn add_consumer(&mut self, event_out: Sender); + fn add_consumer(&mut self, event_out: Sender>); /// Forward the event to downstream consumers - fn forward_event(&mut self, ev: SignerEvent) -> bool; + fn forward_event(&mut self, ev: SignerEvent) -> bool; /// Determine if the receiver should hang up fn is_stopped(&self) -> bool; /// Get a stop signal instance that, when sent, will cause this receiver to stop accepting new @@ -169,23 +184,23 @@ pub trait EventReceiver { } /// Event receiver for Signer events -pub struct SignerEventReceiver { +pub struct SignerEventReceiver { /// Address we bind to local_addr: Option, /// server socket that listens for HTTP POSTs from the node http_server: Option, /// channel into which to write newly-discovered data - out_channels: Vec>, + out_channels: Vec>>, /// inter-thread stop variable -- if set to true, then the `main_loop` will exit stop_signal: Arc, /// Whether the receiver is running on mainnet is_mainnet: bool, } -impl SignerEventReceiver { +impl SignerEventReceiver { /// Make a new Signer event receiver, and return both the receiver and the read end of a /// channel into which node-received data can be obtained. - pub fn new(is_mainnet: bool) -> SignerEventReceiver { + pub fn new(is_mainnet: bool) -> SignerEventReceiver { SignerEventReceiver { http_server: None, local_addr: None, @@ -198,7 +213,7 @@ impl SignerEventReceiver { /// Do something with the socket pub fn with_server(&mut self, todo: F) -> Result where - F: FnOnce(&SignerEventReceiver, &mut HttpServer, bool) -> R, + F: FnOnce(&SignerEventReceiver, &mut HttpServer, bool) -> R, { let mut server = if let Some(s) = self.http_server.take() { s @@ -230,6 +245,7 @@ impl SignerStopSignaler { } impl EventStopSignaler for SignerStopSignaler { + #[cfg_attr(test, mutants::skip)] fn send(&mut self) { self.stop_signal.store(true, Ordering::SeqCst); // wake up the thread so the atomicbool can be checked @@ -243,15 +259,14 @@ impl EventStopSignaler for SignerStopSignaler { body.len(), body ); - match stream.write_all(req.as_bytes()) { - Err(e) => error!("Failed to send shutdown request: {}", e), - _ => (), - }; + if let Err(e) = stream.write_all(req.as_bytes()) { + error!("Failed to send shutdown request: {}", e); + } } } } -impl EventReceiver for SignerEventReceiver { +impl EventReceiver for SignerEventReceiver { type ST = SignerStopSignaler; /// Start listening on the given socket address. @@ -266,7 +281,7 @@ impl EventReceiver for SignerEventReceiver { /// Wait for the node to post something, and then return it. /// Errors are recoverable -- the caller should call this method again even if it returns an /// error. - fn next_event(&mut self) -> Result { + fn next_event(&mut self) -> Result, EventError> { self.with_server(|event_receiver, http_server, _is_mainnet| { // were we asked to terminate? if event_receiver.is_stopped() { @@ -323,7 +338,7 @@ impl EventReceiver for SignerEventReceiver { /// Forward an event /// Return true on success; false on error. /// Returning false terminates the event receiver. - fn forward_event(&mut self, ev: SignerEvent) -> bool { + fn forward_event(&mut self, ev: SignerEvent) -> bool { if self.out_channels.is_empty() { // nothing to do error!("No channels connected to event receiver"); @@ -347,7 +362,7 @@ impl EventReceiver for SignerEventReceiver { } /// Add an event consumer. A received event will be forwarded to this Sender. - fn add_consumer(&mut self, out_channel: Sender) { + fn add_consumer(&mut self, out_channel: Sender>) { self.out_channels.push(out_channel); } @@ -371,12 +386,13 @@ fn ack_dispatcher(request: HttpRequest) { }; } +// TODO: add tests from mutation testing results #4835 +#[cfg_attr(test, mutants::skip)] /// Process a stackerdb event from the node -fn process_stackerdb_event( +fn process_stackerdb_event( local_addr: Option, mut request: HttpRequest, -) -> Result { - debug!("Got stackerdb_chunks event"); +) -> Result, EventError> { let mut body = String::new(); if let Err(e) = request.as_reader().read_to_string(&mut body) { error!("Failed to read body: {:?}", &e); @@ -387,6 +403,7 @@ fn process_stackerdb_event( ))); } + debug!("Got stackerdb_chunks event"; "chunks_event_body" => %body); let event: StackerDBChunksEvent = serde_json::from_slice(body.as_bytes()) .map_err(|e| EventError::Deserialize(format!("Could not decode body to JSON: {:?}", &e)))?; @@ -400,7 +417,7 @@ fn process_stackerdb_event( event_contract_id ); ack_dispatcher(request); - return Err(e.into()); + return Err(e); } Ok(x) => x, }; @@ -410,44 +427,28 @@ fn process_stackerdb_event( Ok(signer_event) } -impl TryFrom for SignerEvent { +impl TryFrom for SignerEvent { type Error = EventError; fn try_from(event: StackerDBChunksEvent) -> Result { let signer_event = if event.contract_id.name.as_str() == MINERS_NAME && event.contract_id.is_boot() { - let mut blocks = vec![]; let mut messages = vec![]; let mut miner_pk = None; for chunk in event.modified_slots { + let Ok(msg) = T::consensus_deserialize(&mut chunk.data.as_slice()) else { + continue; + }; + miner_pk = Some(chunk.recover_pk().map_err(|e| { EventError::MalformedRequest(format!( "Failed to recover PK from StackerDB chunk: {e}" )) })?); - if chunk.slot_id % MINER_SLOT_COUNT == 0 { - // block - let Ok(block) = - BlockProposalSigners::consensus_deserialize(&mut chunk.data.as_slice()) - else { - continue; - }; - blocks.push(block); - } else if chunk.slot_id % MINER_SLOT_COUNT == 1 { - // message - let Ok(msg) = SignerMessage::consensus_deserialize(&mut chunk.data.as_slice()) - else { - continue; - }; - messages.push(msg); - } else { - return Err(EventError::UnrecognizedEvent( - "Unrecognized slot_id for miners contract".into(), - )); - }; + messages.push(msg); } - SignerEvent::MinerMessages(blocks, messages, miner_pk) + SignerEvent::MinerMessages(messages, miner_pk.ok_or(EventError::EmptyChunksEvent)?) } else if event.contract_id.name.starts_with(SIGNERS_NAME) && event.contract_id.is_boot() { let Some((signer_set, _)) = get_signers_db_signer_set_message_id(event.contract_id.name.as_str()) @@ -455,10 +456,10 @@ impl TryFrom for SignerEvent { return Err(EventError::UnrecognizedStackerDBContract(event.contract_id)); }; // signer-XXX-YYY boot contract - let signer_messages: Vec = event + let signer_messages: Vec = event .modified_slots .iter() - .filter_map(|chunk| read_next::(&mut &chunk.data[..]).ok()) + .filter_map(|chunk| read_next::(&mut &chunk.data[..]).ok()) .collect(); SignerEvent::SignerMessages(signer_set, signer_messages) } else { @@ -469,7 +470,9 @@ impl TryFrom for SignerEvent { } /// Process a proposal response from the node -fn process_proposal_response(mut request: HttpRequest) -> Result { +fn process_proposal_response( + mut request: HttpRequest, +) -> Result, EventError> { debug!("Got proposal_response event"); let mut body = String::new(); if let Err(e) = request.as_reader().read_to_string(&mut body) { @@ -495,7 +498,9 @@ fn process_proposal_response(mut request: HttpRequest) -> Result Result { +fn process_new_burn_block_event( + mut request: HttpRequest, +) -> Result, EventError> { debug!("Got burn_block event"); let mut body = String::new(); if let Err(e) = request.as_reader().read_to_string(&mut body) { @@ -519,7 +524,19 @@ fn process_new_burn_block_event(mut request: HttpRequest) -> Result QualifiedContractIdentifier; + /// All possible Message Slot values + fn all() -> &'static [Self]; +} + +/// A trait for signer messages used in signer communciation +pub trait SignerMessage: StacksMessageCodec { + /// The contract identifier for the message slot in stacker db + fn msg_id(&self) -> Option; +} diff --git a/libsigner/src/runloop.rs b/libsigner/src/runloop.rs index 0b7eb2dbcf6..bf786888c16 100644 --- a/libsigner/src/runloop.rs +++ b/libsigner/src/runloop.rs @@ -1,5 +1,5 @@ // Copyright (C) 2013-2020 Blockstack PBC, a public benefit corporation -// Copyright (C) 2020-2023 Stacks Open Internet Foundation +// Copyright (C) 2020-2024 Stacks Open Internet Foundation // // This program is free software: you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by @@ -24,11 +24,12 @@ use std::thread; use std::thread::JoinHandle; use std::time::Duration; +use clarity::codec::StacksMessageCodec; use stacks_common::deps_common::ctrlc as termination; use stacks_common::deps_common::ctrlc::SignalId; use crate::error::EventError; -use crate::events::{EventReceiver, EventStopSignaler, SignerEvent}; +use crate::events::{EventReceiver, EventStopSignaler, SignerEvent, SignerEventTrait}; /// Some libcs, like musl, have a very small stack size. /// Make sure it's big enough. @@ -40,7 +41,7 @@ const STDERR: i32 = 2; /// Trait describing the needful components of a top-level runloop. /// This is where the signer business logic would go. /// Implement this, and you get all the multithreaded setup for free. -pub trait SignerRunLoop { +pub trait SignerRunLoop { /// Hint to set how long to wait for new events fn set_event_timeout(&mut self, timeout: Duration); /// Getter for the event poll timeout @@ -50,7 +51,7 @@ pub trait SignerRunLoop { /// Returns None to keep running. fn run_one_pass( &mut self, - event: Option, + event: Option>, cmd: Option, res: Sender, ) -> Option; @@ -64,7 +65,7 @@ pub trait SignerRunLoop { /// This would run in a separate thread from the event receiver. fn main_loop( &mut self, - event_recv: Receiver, + event_recv: Receiver>, command_recv: Receiver, result_send: Sender, mut event_stop_signaler: EVST, @@ -93,7 +94,7 @@ pub trait SignerRunLoop { } /// The top-level signer implementation -pub struct Signer { +pub struct Signer { /// the runloop itself signer_loop: Option, /// the event receiver to use @@ -102,10 +103,12 @@ pub struct Signer { command_receiver: Option>, /// the result sender to use result_sender: Option>, + /// phantom data for the codec + phantom_data: PhantomData, } /// The running signer implementation -pub struct RunningSigner { +pub struct RunningSigner, R, T: SignerEventTrait> { /// join handle for signer runloop signer_join: JoinHandle>, /// join handle for event receiver @@ -114,7 +117,7 @@ pub struct RunningSigner { stop_signal: EV::ST, } -impl RunningSigner { +impl, R, T: SignerEventTrait> RunningSigner { /// Stop the signer, and get the final state pub fn stop(mut self) -> Option { // kill event receiver @@ -189,19 +192,20 @@ pub fn set_runloop_signal_handler(mut st }).expect("FATAL: failed to set signal handler"); } -impl Signer { +impl Signer { /// Create a new signer with the given runloop and event receiver. pub fn new( runloop: SL, event_receiver: EV, command_receiver: Receiver, result_sender: Sender, - ) -> Signer { + ) -> Signer { Signer { signer_loop: Some(runloop), event_receiver: Some(event_receiver), command_receiver: Some(command_receiver), result_sender: Some(result_sender), + phantom_data: PhantomData, } } } @@ -209,9 +213,10 @@ impl Signer { impl< CMD: Send + 'static, R: Send + 'static, - SL: SignerRunLoop + Send + 'static, - EV: EventReceiver + Send + 'static, - > Signer + T: SignerEventTrait + 'static, + SL: SignerRunLoop + Send + 'static, + EV: EventReceiver + Send + 'static, + > Signer { /// This is a helper function to spawn both the runloop and event receiver in their own /// threads. Advanced signers may not need this method, and instead opt to run the receiver @@ -223,7 +228,7 @@ impl< /// /// On success, this method consumes the Signer and returns a RunningSigner with the relevant /// inter-thread communication primitives for the caller to shut down the system. - pub fn spawn(&mut self, bind_addr: SocketAddr) -> Result, EventError> { + pub fn spawn(&mut self, bind_addr: SocketAddr) -> Result, EventError> { let mut event_receiver = self .event_receiver .take() @@ -241,13 +246,14 @@ impl< let (event_send, event_recv) = channel(); event_receiver.add_consumer(event_send); + let bind_port = bind_addr.port(); event_receiver.bind(bind_addr)?; let stop_signaler = event_receiver.get_stop_signaler()?; let mut ret_stop_signaler = event_receiver.get_stop_signaler()?; // start a thread for the event receiver let event_thread = thread::Builder::new() - .name("event_receiver".to_string()) + .name(format!("event_receiver:{bind_port}")) .stack_size(THREAD_STACK_SIZE) .spawn(move || event_receiver.main_loop()) .map_err(|e| { @@ -257,7 +263,7 @@ impl< // start receiving events and doing stuff with them let runloop_thread = thread::Builder::new() - .name("signer_runloop".to_string()) + .name(format!("signer_runloop:{bind_port}")) .stack_size(THREAD_STACK_SIZE) .spawn(move || { signer_loop.main_loop(event_recv, command_receiver, result_sender, stop_signaler) diff --git a/libsigner/src/session.rs b/libsigner/src/session.rs index 30966f897bb..c13621392b7 100644 --- a/libsigner/src/session.rs +++ b/libsigner/src/session.rs @@ -1,5 +1,5 @@ // Copyright (C) 2013-2020 Blockstack PBC, a public benefit corporation -// Copyright (C) 2020-2023 Stacks Open Internet Foundation +// Copyright (C) 2020-2024 Stacks Open Internet Foundation // // This program is free software: you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by @@ -20,7 +20,8 @@ use std::str; use clarity::vm::types::QualifiedContractIdentifier; use libstackerdb::{ stackerdb_get_chunk_path, stackerdb_get_metadata_path, stackerdb_post_chunk_path, SlotMetadata, - StackerDBChunkAckData, StackerDBChunkData, + StackerDBChunkAckData, StackerDBChunkData, SIGNERS_STACKERDB_CHUNK_SIZE, + STACKERDB_MAX_CHUNK_SIZE, }; use stacks_common::codec::StacksMessageCodec; @@ -94,6 +95,7 @@ pub trait SignerSession { } /// signer session for a stackerdb instance +#[derive(Debug)] pub struct StackerDBSession { /// host we're talking to pub host: String, @@ -214,10 +216,23 @@ impl SignerSession for StackerDBSession { /// query the replica for zero or more latest chunks fn get_latest_chunks(&mut self, slot_ids: &[u32]) -> Result>>, RPCError> { let mut payloads = vec![]; + let limit = if self.stackerdb_contract_id.name.starts_with("signer") { + SIGNERS_STACKERDB_CHUNK_SIZE + } else { + usize::try_from(STACKERDB_MAX_CHUNK_SIZE) + .expect("infallible: StackerDB chunk size exceeds usize::MAX") + }; for slot_id in slot_ids.iter() { let path = stackerdb_get_chunk_path(self.stackerdb_contract_id.clone(), *slot_id, None); let chunk = match self.rpc_request("GET", &path, None, &[]) { - Ok(body_bytes) => Some(body_bytes), + Ok(body_bytes) => { + // Verify that the chunk is not too large + if body_bytes.len() > limit { + None + } else { + Some(body_bytes) + } + } Err(RPCError::HttpError(code)) => { if code != 404 { return Err(RPCError::HttpError(code)); diff --git a/libsigner/src/signer_set.rs b/libsigner/src/signer_set.rs index 119873fd1e4..fdcb857faf8 100644 --- a/libsigner/src/signer_set.rs +++ b/libsigner/src/signer_set.rs @@ -86,7 +86,7 @@ impl SignerEntries { weight_end = weight_start + entry.weight; let key_ids: HashSet = (weight_start..weight_end).collect(); for key_id in key_ids.iter() { - wsts_key_ids.insert(*key_id, ecdsa_pk.clone()); + wsts_key_ids.insert(*key_id, ecdsa_pk); } signer_key_ids.insert(signer_id, (weight_start..weight_end).collect()); coordinator_key_ids.insert(signer_id, key_ids); diff --git a/libsigner/src/tests/http.rs b/libsigner/src/tests/http.rs index d2b052fae9b..d0f3887b45d 100644 --- a/libsigner/src/tests/http.rs +++ b/libsigner/src/tests/http.rs @@ -264,7 +264,7 @@ fn test_run_http_request_with_body() { let result_chunked = run_http_request( &mut msock_chunked, - &"127.0.0.1:20443", + "127.0.0.1:20443", verb, path, content_type, @@ -275,7 +275,7 @@ fn test_run_http_request_with_body() { let result_plain = run_http_request( &mut msock_plain, - &"127.0.0.1:20443", + "127.0.0.1:20443", verb, path, content_type, @@ -321,7 +321,7 @@ fn test_run_http_request_no_body() { let result_chunked = run_http_request( &mut msock_chunked, - &"127.0.0.1:20443", + "127.0.0.1:20443", verb, path, content_type, @@ -330,7 +330,7 @@ fn test_run_http_request_no_body() { .unwrap(); let result_plain = run_http_request( &mut msock_plain, - &"127.0.0.1:20443", + "127.0.0.1:20443", verb, path, content_type, diff --git a/libsigner/src/tests/mod.rs b/libsigner/src/tests/mod.rs index 1d3e1f3cc0d..c584572ba7b 100644 --- a/libsigner/src/tests/mod.rs +++ b/libsigner/src/tests/mod.rs @@ -16,6 +16,7 @@ mod http; +use std::fmt::Debug; use std::io::{Read, Write}; use std::net::{SocketAddr, TcpStream, ToSocketAddrs}; use std::sync::mpsc::{channel, Receiver, Sender}; @@ -36,20 +37,20 @@ use stacks_common::util::secp256k1::Secp256k1PrivateKey; use stacks_common::util::sleep_ms; use wsts::net::{DkgBegin, Packet}; -use crate::events::SignerEvent; -use crate::messages::SignerMessage; +use crate::events::{SignerEvent, SignerEventTrait}; +use crate::v1::messages::SignerMessage; use crate::{Signer, SignerEventReceiver, SignerRunLoop}; /// Simple runloop implementation. It receives `max_events` events and returns `events` from the /// last call to `run_one_pass` as its final state. -struct SimpleRunLoop { +struct SimpleRunLoop { poll_timeout: Duration, - events: Vec, + events: Vec>, max_events: usize, } -impl SimpleRunLoop { - pub fn new(max_events: usize) -> SimpleRunLoop { +impl SimpleRunLoop { + pub fn new(max_events: usize) -> SimpleRunLoop { SimpleRunLoop { poll_timeout: Duration::from_millis(100), events: vec![], @@ -62,7 +63,7 @@ enum Command { Empty, } -impl SignerRunLoop, Command> for SimpleRunLoop { +impl SignerRunLoop>, Command, T> for SimpleRunLoop { fn set_event_timeout(&mut self, timeout: Duration) { self.poll_timeout = timeout; } @@ -73,10 +74,10 @@ impl SignerRunLoop, Command> for SimpleRunLoop { fn run_one_pass( &mut self, - event: Option, + event: Option>, _cmd: Option, - _res: Sender>, - ) -> Option> { + _res: Sender>>, + ) -> Option>> { debug!("Got event: {:?}", &event); if let Some(event) = event { self.events.push(event); @@ -161,7 +162,7 @@ fn test_simple_signer() { .unwrap() }); - let sent_events: Vec = chunks + let sent_events: Vec> = chunks .iter() .map(|chunk| { let msg = chunk.modified_slots[0].data.clone(); @@ -211,7 +212,7 @@ fn test_status_endpoint() { sleep_ms(3000); let accepted_events = running_signer.stop().unwrap(); - let sent_events: Vec = vec![SignerEvent::StatusCheck]; + let sent_events: Vec> = vec![SignerEvent::StatusCheck]; assert_eq!(sent_events, accepted_events); mock_stacks_node.join().unwrap(); diff --git a/libsigner/src/v0/messages.rs b/libsigner/src/v0/messages.rs new file mode 100644 index 00000000000..7d411f89b5b --- /dev/null +++ b/libsigner/src/v0/messages.rs @@ -0,0 +1,943 @@ +// Copyright (C) 2013-2020 Blockstack PBC, a public benefit corporation +// Copyright (C) 2020-2024 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +//! Messages in the signer-miner interaction have a multi-level hierarchy. +//! Signers send messages to each other through Packet messages. These messages, +//! as well as `BlockResponse`, `Transactions`, and `DkgResults` messages are stored +//! StackerDBs based on the `MessageSlotID` for the particular message type. This is a +//! shared identifier space between the four message kinds and their subtypes. +//! +//! These four message kinds are differentiated with a `SignerMessageTypePrefix` +//! and the `SignerMessage` enum. + +use std::fmt::{Debug, Display}; +use std::io::{Read, Write}; +use std::net::{SocketAddr, TcpListener, TcpStream}; +use std::sync::atomic::{AtomicBool, Ordering}; +use std::sync::mpsc::Sender; +use std::sync::Arc; + +use blockstack_lib::chainstate::nakamoto::signer_set::NakamotoSigners; +use blockstack_lib::chainstate::nakamoto::NakamotoBlock; +use blockstack_lib::chainstate::stacks::events::StackerDBChunksEvent; +use blockstack_lib::chainstate::stacks::StacksTransaction; +use blockstack_lib::net::api::getinfo::RPCPeerInfoData; +use blockstack_lib::net::api::postblock_proposal::{ + BlockValidateReject, BlockValidateResponse, ValidateRejectCode, +}; +use blockstack_lib::util_lib::boot::boot_code_id; +use blockstack_lib::util_lib::signed_structured_data::{ + make_structured_data_domain, structured_data_message_hash, +}; +use clarity::types::chainstate::{ + BlockHeaderHash, ConsensusHash, StacksPrivateKey, StacksPublicKey, +}; +use clarity::types::PrivateKey; +use clarity::util::hash::Sha256Sum; +use clarity::util::retry::BoundReader; +use clarity::util::secp256k1::MessageSignature; +use clarity::vm::types::serialization::SerializationError; +use clarity::vm::types::{QualifiedContractIdentifier, TupleData}; +use clarity::vm::Value; +use hashbrown::{HashMap, HashSet}; +use serde::{Deserialize, Serialize}; +use sha2::{Digest, Sha512_256}; +use stacks_common::codec::{ + read_next, read_next_at_most, read_next_exact, write_next, Error as CodecError, + StacksMessageCodec, +}; +use stacks_common::consts::SIGNER_SLOTS_PER_USER; +use stacks_common::util::hash::Sha512Trunc256Sum; +use tiny_http::{ + Method as HttpMethod, Request as HttpRequest, Response as HttpResponse, Server as HttpServer, +}; + +use crate::http::{decode_http_body, decode_http_request}; +use crate::stacks_common::types::PublicKey; +use crate::{ + BlockProposal, EventError, MessageSlotID as MessageSlotIDTrait, + SignerMessage as SignerMessageTrait, +}; + +define_u8_enum!( +/// Enum representing the stackerdb message identifier: this is +/// the contract index in the signers contracts (i.e., X in signers-0-X) +MessageSlotID { + /// Block Response message from signers + BlockResponse = 1, + /// Mock Signature message from Epoch 2.5 signers + MockSignature = 2 +}); + +define_u8_enum!( +/// Enum representing the slots used by the miner +MinerSlotID { + /// Block proposal from the miner + BlockProposal = 0, + /// Block pushed from the miner + BlockPushed = 1 +}); + +impl MessageSlotIDTrait for MessageSlotID { + fn stacker_db_contract(&self, mainnet: bool, reward_cycle: u64) -> QualifiedContractIdentifier { + NakamotoSigners::make_signers_db_contract_id(reward_cycle, self.to_u32(), mainnet) + } + fn all() -> &'static [Self] { + MessageSlotID::ALL + } +} + +impl SignerMessageTrait for SignerMessage { + fn msg_id(&self) -> Option { + self.msg_id() + } +} + +define_u8_enum!( +/// Enum representing the SignerMessage type prefix +SignerMessageTypePrefix { + /// Block Proposal message from miners + BlockProposal = 0, + /// Block Response message from signers + BlockResponse = 1, + /// Block Pushed message from miners + BlockPushed = 2, + /// Mock Signature message from Epoch 2.5 signers + MockSignature = 3 +}); + +#[cfg_attr(test, mutants::skip)] +impl MessageSlotID { + /// Return the StackerDB contract corresponding to messages of this type + pub fn stacker_db_contract( + &self, + mainnet: bool, + reward_cycle: u64, + ) -> QualifiedContractIdentifier { + NakamotoSigners::make_signers_db_contract_id(reward_cycle, self.to_u32(), mainnet) + } + + /// Return the u32 identifier for the message slot (used to index the contract that stores it) + pub fn to_u32(self) -> u32 { + self.to_u8().into() + } +} + +#[cfg_attr(test, mutants::skip)] +impl Display for MessageSlotID { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "{:?}({})", self, self.to_u8()) + } +} + +impl TryFrom for SignerMessageTypePrefix { + type Error = CodecError; + fn try_from(value: u8) -> Result { + Self::from_u8(value).ok_or_else(|| { + CodecError::DeserializeError(format!("Unknown signer message type prefix: {value}")) + }) + } +} + +impl From<&SignerMessage> for SignerMessageTypePrefix { + #[cfg_attr(test, mutants::skip)] + fn from(message: &SignerMessage) -> Self { + match message { + SignerMessage::BlockProposal(_) => SignerMessageTypePrefix::BlockProposal, + SignerMessage::BlockResponse(_) => SignerMessageTypePrefix::BlockResponse, + SignerMessage::BlockPushed(_) => SignerMessageTypePrefix::BlockPushed, + SignerMessage::MockSignature(_) => SignerMessageTypePrefix::MockSignature, + } + } +} + +/// The messages being sent through the stacker db contracts +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] +pub enum SignerMessage { + /// The block proposal from miners for signers to observe and sign + BlockProposal(BlockProposal), + /// The block response from signers for miners to observe + BlockResponse(BlockResponse), + /// A block pushed from miners to the signers set + BlockPushed(NakamotoBlock), + /// A mock signature from the epoch 2.5 signers + MockSignature(MockSignature), +} + +impl SignerMessage { + /// Helper function to determine the slot ID for the provided stacker-db writer id + /// Not every message has a `MessageSlotID`: messages from the miner do not + /// broadcast over `.signers-0-X` contracts. + #[cfg_attr(test, mutants::skip)] + pub fn msg_id(&self) -> Option { + match self { + Self::BlockProposal(_) | Self::BlockPushed(_) => None, + Self::BlockResponse(_) => Some(MessageSlotID::BlockResponse), + Self::MockSignature(_) => Some(MessageSlotID::MockSignature), + } + } +} + +impl StacksMessageCodec for SignerMessage { + fn consensus_serialize(&self, fd: &mut W) -> Result<(), CodecError> { + SignerMessageTypePrefix::from(self) + .to_u8() + .consensus_serialize(fd)?; + match self { + SignerMessage::BlockProposal(block_proposal) => block_proposal.consensus_serialize(fd), + SignerMessage::BlockResponse(block_response) => block_response.consensus_serialize(fd), + SignerMessage::BlockPushed(block) => block.consensus_serialize(fd), + SignerMessage::MockSignature(signature) => signature.consensus_serialize(fd), + }?; + Ok(()) + } + + #[cfg_attr(test, mutants::skip)] + fn consensus_deserialize(fd: &mut R) -> Result { + let type_prefix_byte = u8::consensus_deserialize(fd)?; + let type_prefix = SignerMessageTypePrefix::try_from(type_prefix_byte)?; + let message = match type_prefix { + SignerMessageTypePrefix::BlockProposal => { + let block_proposal = StacksMessageCodec::consensus_deserialize(fd)?; + SignerMessage::BlockProposal(block_proposal) + } + SignerMessageTypePrefix::BlockResponse => { + let block_response = StacksMessageCodec::consensus_deserialize(fd)?; + SignerMessage::BlockResponse(block_response) + } + SignerMessageTypePrefix::BlockPushed => { + let block = StacksMessageCodec::consensus_deserialize(fd)?; + SignerMessage::BlockPushed(block) + } + SignerMessageTypePrefix::MockSignature => { + let signature = StacksMessageCodec::consensus_deserialize(fd)?; + SignerMessage::MockSignature(signature) + } + }; + Ok(message) + } +} + +/// Work around for the fact that a lot of the structs being desierialized are not defined in messages.rs +pub trait StacksMessageCodecExtensions: Sized { + /// Serialize the struct to the provided writer + fn inner_consensus_serialize(&self, fd: &mut W) -> Result<(), CodecError>; + /// Deserialize the struct from the provided reader + fn inner_consensus_deserialize(fd: &mut R) -> Result; +} + +/// The signer relevant peer information from the stacks node +#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] +pub struct PeerInfo { + /// The burn block height + pub burn_block_height: u64, + /// The consensus hash of the stacks tip + pub stacks_tip_consensus_hash: ConsensusHash, + /// The stacks tip + pub stacks_tip: BlockHeaderHash, + /// The stacks tip height + pub stacks_tip_height: u64, + /// The pox consensus + pub pox_consensus: ConsensusHash, + /// The server version + pub server_version: String, +} + +impl StacksMessageCodec for PeerInfo { + fn consensus_serialize(&self, fd: &mut W) -> Result<(), CodecError> { + write_next(fd, &self.burn_block_height)?; + write_next(fd, self.stacks_tip_consensus_hash.as_bytes())?; + write_next(fd, &self.stacks_tip)?; + write_next(fd, &self.stacks_tip_height)?; + write_next(fd, &(self.server_version.as_bytes().len() as u8))?; + fd.write_all(self.server_version.as_bytes()) + .map_err(CodecError::WriteError)?; + write_next(fd, &self.pox_consensus)?; + Ok(()) + } + + fn consensus_deserialize(fd: &mut R) -> Result { + let burn_block_height = read_next::(fd)?; + let stacks_tip_consensus_hash = read_next::(fd)?; + let stacks_tip = read_next::(fd)?; + let stacks_tip_height = read_next::(fd)?; + let len_byte: u8 = read_next(fd)?; + let mut bytes = vec![0u8; len_byte as usize]; + fd.read_exact(&mut bytes).map_err(CodecError::ReadError)?; + // must encode a valid string + let server_version = String::from_utf8(bytes).map_err(|_e| { + CodecError::DeserializeError( + "Failed to parse server version name: could not contruct from utf8".to_string(), + ) + })?; + let pox_consensus = read_next::(fd)?; + Ok(Self { + burn_block_height, + stacks_tip_consensus_hash, + stacks_tip, + stacks_tip_height, + server_version, + pox_consensus, + }) + } +} + +/// A snapshot of the signer view of the stacks node to be used for mock signing. +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] +pub struct MockSignData { + /// The view of the stacks node peer information at the time of the mock signature + pub peer_info: PeerInfo, + /// The burn block height of the event that triggered the mock signature + pub event_burn_block_height: u64, + /// The chain id for the mock signature + pub chain_id: u32, +} + +impl StacksMessageCodec for MockSignData { + fn consensus_serialize(&self, fd: &mut W) -> Result<(), CodecError> { + self.peer_info.consensus_serialize(fd)?; + write_next(fd, &self.event_burn_block_height)?; + write_next(fd, &self.chain_id)?; + Ok(()) + } + + fn consensus_deserialize(fd: &mut R) -> Result { + let peer_info = PeerInfo::consensus_deserialize(fd)?; + let event_burn_block_height = read_next::(fd)?; + let chain_id = read_next::(fd)?; + Ok(Self { + peer_info, + event_burn_block_height, + chain_id, + }) + } +} + +/// A mock signature for the stacks node to be used for mock signing. +/// This is only used by Epoch 2.5 signers to simulate the signing of a block for every sortition. +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] +pub struct MockSignature { + /// The signature of the mock signature + signature: MessageSignature, + /// The data that was signed across + pub sign_data: MockSignData, +} + +impl MockSignature { + /// Create a new mock sign data struct from the provided event burn block height, peer info, chain id, and private key. + /// Note that peer burn block height and event burn block height may not be the same if the peer view is stale. + pub fn new( + event_burn_block_height: u64, + peer_info: PeerInfo, + chain_id: u32, + stacks_private_key: &StacksPrivateKey, + ) -> Self { + let mut sig = Self { + signature: MessageSignature::empty(), + sign_data: MockSignData { + peer_info, + event_burn_block_height, + chain_id, + }, + }; + sig.sign(stacks_private_key) + .expect("Failed to sign MockSignature"); + sig + } + + /// The signature hash for the mock signature + pub fn signature_hash(&self) -> Sha256Sum { + let domain_tuple = + make_structured_data_domain("mock-signer", "1.0.0", self.sign_data.chain_id); + let data_tuple = Value::Tuple( + TupleData::from_data(vec![ + ( + "stacks-tip-consensus-hash".into(), + Value::buff_from( + self.sign_data + .peer_info + .stacks_tip_consensus_hash + .as_bytes() + .into(), + ) + .unwrap(), + ), + ( + "stacks-tip".into(), + Value::buff_from(self.sign_data.peer_info.stacks_tip.as_bytes().into()) + .unwrap(), + ), + ( + "stacks-tip-height".into(), + Value::UInt(self.sign_data.peer_info.stacks_tip_height.into()), + ), + ( + "server-version".into(), + Value::string_ascii_from_bytes( + self.sign_data.peer_info.server_version.clone().into(), + ) + .unwrap(), + ), + ( + "event-burn-block-height".into(), + Value::UInt(self.sign_data.event_burn_block_height.into()), + ), + ( + "pox-consensus".into(), + Value::buff_from(self.sign_data.peer_info.pox_consensus.as_bytes().into()) + .unwrap(), + ), + ]) + .expect("Error creating signature hash"), + ); + structured_data_message_hash(data_tuple, domain_tuple) + } + + /// Sign the mock signature and set the internal signature field + fn sign(&mut self, private_key: &StacksPrivateKey) -> Result<(), String> { + let signature_hash = self.signature_hash(); + self.signature = private_key.sign(signature_hash.as_bytes())?; + Ok(()) + } + /// Verify the mock signature against the provided public key + pub fn verify(&self, public_key: &StacksPublicKey) -> Result { + if self.signature == MessageSignature::empty() { + return Ok(false); + } + let signature_hash = self.signature_hash(); + public_key + .verify(&signature_hash.0, &self.signature) + .map_err(|e| e.to_string()) + } +} + +impl StacksMessageCodec for MockSignature { + fn consensus_serialize(&self, fd: &mut W) -> Result<(), CodecError> { + write_next(fd, &self.signature)?; + self.sign_data.consensus_serialize(fd)?; + Ok(()) + } + + fn consensus_deserialize(fd: &mut R) -> Result { + let signature = read_next::(fd)?; + let sign_data = read_next::(fd)?; + Ok(Self { + signature, + sign_data, + }) + } +} + +define_u8_enum!( +/// Enum representing the reject code type prefix +RejectCodeTypePrefix { + /// The block was rejected due to validation issues + ValidationFailed = 0, + /// The block was rejected due to connectivity issues with the signer + ConnectivityIssues = 1, + /// The block was rejected in a prior round + RejectedInPriorRound = 2, + /// The block was rejected due to no sortition view + NoSortitionView = 3, + /// The block was rejected due to a mismatch with expected sortition view + SortitionViewMismatch = 4 +}); + +impl TryFrom for RejectCodeTypePrefix { + type Error = CodecError; + fn try_from(value: u8) -> Result { + Self::from_u8(value).ok_or_else(|| { + CodecError::DeserializeError(format!("Unknown reject code type prefix: {value}")) + }) + } +} + +impl From<&RejectCode> for RejectCodeTypePrefix { + fn from(reject_code: &RejectCode) -> Self { + match reject_code { + RejectCode::ValidationFailed(_) => RejectCodeTypePrefix::ValidationFailed, + RejectCode::ConnectivityIssues => RejectCodeTypePrefix::ConnectivityIssues, + RejectCode::RejectedInPriorRound => RejectCodeTypePrefix::RejectedInPriorRound, + RejectCode::NoSortitionView => RejectCodeTypePrefix::NoSortitionView, + RejectCode::SortitionViewMismatch => RejectCodeTypePrefix::SortitionViewMismatch, + } + } +} + +/// This enum is used to supply a `reason_code` for block rejections +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] +pub enum RejectCode { + /// RPC endpoint Validation failed + ValidationFailed(ValidateRejectCode), + /// No Sortition View to verify against + NoSortitionView, + /// The block was rejected due to connectivity issues with the signer + ConnectivityIssues, + /// The block was rejected in a prior round + RejectedInPriorRound, + /// The block was rejected due to a mismatch with expected sortition view + SortitionViewMismatch, +} + +define_u8_enum!( +/// Enum representing the BlockResponse type prefix +BlockResponseTypePrefix { + /// An accepted block response + Accepted = 0, + /// A rejected block response + Rejected = 1 +}); + +impl TryFrom for BlockResponseTypePrefix { + type Error = CodecError; + fn try_from(value: u8) -> Result { + Self::from_u8(value).ok_or_else(|| { + CodecError::DeserializeError(format!("Unknown block response type prefix: {value}")) + }) + } +} + +impl From<&BlockResponse> for BlockResponseTypePrefix { + fn from(block_response: &BlockResponse) -> Self { + match block_response { + BlockResponse::Accepted(_) => BlockResponseTypePrefix::Accepted, + BlockResponse::Rejected(_) => BlockResponseTypePrefix::Rejected, + } + } +} + +/// The response that a signer sends back to observing miners +/// either accepting or rejecting a Nakamoto block with the corresponding reason +#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] +pub enum BlockResponse { + /// The Nakamoto block was accepted and therefore signed + Accepted((Sha512Trunc256Sum, MessageSignature)), + /// The Nakamoto block was rejected and therefore not signed + Rejected(BlockRejection), +} + +#[cfg_attr(test, mutants::skip)] +impl std::fmt::Display for BlockResponse { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match self { + BlockResponse::Accepted(a) => { + write!( + f, + "BlockAccepted: signer_sighash = {}, signature = {}", + a.0, a.1 + ) + } + BlockResponse::Rejected(r) => { + write!( + f, + "BlockRejected: signer_sighash = {}, code = {}, reason = {}", + r.reason_code, r.reason, r.signer_signature_hash + ) + } + } + } +} + +impl BlockResponse { + /// Create a new accepted BlockResponse for the provided block signer signature hash and signature + pub fn accepted(hash: Sha512Trunc256Sum, sig: MessageSignature) -> Self { + Self::Accepted((hash, sig)) + } + + /// Create a new rejected BlockResponse for the provided block signer signature hash and rejection code + pub fn rejected(hash: Sha512Trunc256Sum, reject_code: RejectCode) -> Self { + Self::Rejected(BlockRejection::new(hash, reject_code)) + } +} + +impl StacksMessageCodec for BlockResponse { + fn consensus_serialize(&self, fd: &mut W) -> Result<(), CodecError> { + write_next(fd, &(BlockResponseTypePrefix::from(self) as u8))?; + match self { + BlockResponse::Accepted((hash, sig)) => { + write_next(fd, hash)?; + write_next(fd, sig)?; + } + BlockResponse::Rejected(rejection) => { + write_next(fd, rejection)?; + } + }; + Ok(()) + } + + fn consensus_deserialize(fd: &mut R) -> Result { + let type_prefix_byte = read_next::(fd)?; + let type_prefix = BlockResponseTypePrefix::try_from(type_prefix_byte)?; + let response = match type_prefix { + BlockResponseTypePrefix::Accepted => { + let hash = read_next::(fd)?; + let sig = read_next::(fd)?; + BlockResponse::Accepted((hash, sig)) + } + BlockResponseTypePrefix::Rejected => { + let rejection = read_next::(fd)?; + BlockResponse::Rejected(rejection) + } + }; + Ok(response) + } +} + +/// A rejection response from a signer for a proposed block +#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] +pub struct BlockRejection { + /// The reason for the rejection + pub reason: String, + /// The reason code for the rejection + pub reason_code: RejectCode, + /// The signer signature hash of the block that was rejected + pub signer_signature_hash: Sha512Trunc256Sum, +} + +impl BlockRejection { + /// Create a new BlockRejection for the provided block and reason code + pub fn new(signer_signature_hash: Sha512Trunc256Sum, reason_code: RejectCode) -> Self { + Self { + reason: reason_code.to_string(), + reason_code, + signer_signature_hash, + } + } +} + +impl StacksMessageCodec for BlockRejection { + fn consensus_serialize(&self, fd: &mut W) -> Result<(), CodecError> { + write_next(fd, &self.reason.as_bytes().to_vec())?; + write_next(fd, &self.reason_code)?; + write_next(fd, &self.signer_signature_hash)?; + Ok(()) + } + + fn consensus_deserialize(fd: &mut R) -> Result { + let reason_bytes = read_next::, _>(fd)?; + let reason = String::from_utf8(reason_bytes).map_err(|e| { + CodecError::DeserializeError(format!("Failed to decode reason string: {:?}", &e)) + })?; + let reason_code = read_next::(fd)?; + let signer_signature_hash = read_next::(fd)?; + Ok(Self { + reason, + reason_code, + signer_signature_hash, + }) + } +} + +impl From for BlockRejection { + fn from(reject: BlockValidateReject) -> Self { + Self { + reason: reject.reason, + reason_code: RejectCode::ValidationFailed(reject.reason_code), + signer_signature_hash: reject.signer_signature_hash, + } + } +} + +impl StacksMessageCodec for RejectCode { + fn consensus_serialize(&self, fd: &mut W) -> Result<(), CodecError> { + write_next(fd, &(RejectCodeTypePrefix::from(self) as u8))?; + // Do not do a single match here as we may add other variants in the future and don't want to miss adding it + match self { + RejectCode::ValidationFailed(code) => write_next(fd, &(*code as u8))?, + RejectCode::ConnectivityIssues + | RejectCode::RejectedInPriorRound + | RejectCode::NoSortitionView + | RejectCode::SortitionViewMismatch => { + // No additional data to serialize / deserialize + } + }; + Ok(()) + } + + fn consensus_deserialize(fd: &mut R) -> Result { + let type_prefix_byte = read_next::(fd)?; + let type_prefix = RejectCodeTypePrefix::try_from(type_prefix_byte)?; + let code = match type_prefix { + RejectCodeTypePrefix::ValidationFailed => RejectCode::ValidationFailed( + ValidateRejectCode::try_from(read_next::(fd)?).map_err(|e| { + CodecError::DeserializeError(format!( + "Failed to decode validation reject code: {:?}", + &e + )) + })?, + ), + RejectCodeTypePrefix::ConnectivityIssues => RejectCode::ConnectivityIssues, + RejectCodeTypePrefix::RejectedInPriorRound => RejectCode::RejectedInPriorRound, + RejectCodeTypePrefix::NoSortitionView => RejectCode::NoSortitionView, + RejectCodeTypePrefix::SortitionViewMismatch => RejectCode::SortitionViewMismatch, + }; + Ok(code) + } +} + +#[cfg_attr(test, mutants::skip)] +impl std::fmt::Display for RejectCode { + fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { + match self { + RejectCode::ValidationFailed(code) => write!(f, "Validation failed: {:?}", code), + RejectCode::ConnectivityIssues => write!( + f, + "The block was rejected due to connectivity issues with the signer." + ), + RejectCode::RejectedInPriorRound => write!( + f, + "The block was proposed before and rejected by the signer." + ), + RejectCode::NoSortitionView => { + write!(f, "The block was rejected due to no sortition view.") + } + RejectCode::SortitionViewMismatch => { + write!( + f, + "The block was rejected due to a mismatch with expected sortition view." + ) + } + } + } +} + +impl From for SignerMessage { + fn from(block_response: BlockResponse) -> Self { + Self::BlockResponse(block_response) + } +} + +impl From for BlockResponse { + fn from(rejection: BlockValidateReject) -> Self { + Self::Rejected(rejection.into()) + } +} + +#[cfg(test)] +mod test { + use blockstack_lib::chainstate::nakamoto::NakamotoBlockHeader; + use blockstack_lib::chainstate::stacks::{ + ThresholdSignature, TransactionAnchorMode, TransactionAuth, TransactionPayload, + TransactionPostConditionMode, TransactionSmartContract, TransactionVersion, + }; + use blockstack_lib::util_lib::strings::StacksString; + use clarity::consts::CHAIN_ID_MAINNET; + use clarity::types::chainstate::{ConsensusHash, StacksBlockId, TrieHash}; + use clarity::types::PrivateKey; + use clarity::util::hash::MerkleTree; + use clarity::util::secp256k1::MessageSignature; + use rand::{thread_rng, Rng, RngCore}; + use rand_core::OsRng; + use stacks_common::bitvec::BitVec; + use stacks_common::consts::CHAIN_ID_TESTNET; + use stacks_common::types::chainstate::StacksPrivateKey; + + use super::{StacksMessageCodecExtensions, *}; + + #[test] + fn signer_slots_count_is_sane() { + let slot_identifiers_len = MessageSlotID::ALL.len(); + assert!( + SIGNER_SLOTS_PER_USER as usize >= slot_identifiers_len, + "stacks_common::SIGNER_SLOTS_PER_USER ({}) must be >= slot identifiers ({})", + SIGNER_SLOTS_PER_USER, + slot_identifiers_len, + ); + } + + #[test] + fn serde_reject_code() { + let code = RejectCode::ValidationFailed(ValidateRejectCode::InvalidBlock); + let serialized_code = code.serialize_to_vec(); + let deserialized_code = read_next::(&mut &serialized_code[..]) + .expect("Failed to deserialize RejectCode"); + assert_eq!(code, deserialized_code); + + let code = RejectCode::ConnectivityIssues; + let serialized_code = code.serialize_to_vec(); + let deserialized_code = read_next::(&mut &serialized_code[..]) + .expect("Failed to deserialize RejectCode"); + assert_eq!(code, deserialized_code); + } + + #[test] + fn serde_block_rejection() { + let rejection = BlockRejection::new( + Sha512Trunc256Sum([0u8; 32]), + RejectCode::ValidationFailed(ValidateRejectCode::InvalidBlock), + ); + let serialized_rejection = rejection.serialize_to_vec(); + let deserialized_rejection = read_next::(&mut &serialized_rejection[..]) + .expect("Failed to deserialize BlockRejection"); + assert_eq!(rejection, deserialized_rejection); + + let rejection = + BlockRejection::new(Sha512Trunc256Sum([1u8; 32]), RejectCode::ConnectivityIssues); + let serialized_rejection = rejection.serialize_to_vec(); + let deserialized_rejection = read_next::(&mut &serialized_rejection[..]) + .expect("Failed to deserialize BlockRejection"); + assert_eq!(rejection, deserialized_rejection); + } + + #[test] + fn serde_block_response() { + let response = + BlockResponse::Accepted((Sha512Trunc256Sum([0u8; 32]), MessageSignature::empty())); + let serialized_response = response.serialize_to_vec(); + let deserialized_response = read_next::(&mut &serialized_response[..]) + .expect("Failed to deserialize BlockResponse"); + assert_eq!(response, deserialized_response); + + let response = BlockResponse::Rejected(BlockRejection::new( + Sha512Trunc256Sum([1u8; 32]), + RejectCode::ValidationFailed(ValidateRejectCode::InvalidBlock), + )); + let serialized_response = response.serialize_to_vec(); + let deserialized_response = read_next::(&mut &serialized_response[..]) + .expect("Failed to deserialize BlockResponse"); + assert_eq!(response, deserialized_response); + } + + #[test] + fn serde_signer_message() { + let signer_message = SignerMessage::BlockResponse(BlockResponse::Accepted(( + Sha512Trunc256Sum([2u8; 32]), + MessageSignature::empty(), + ))); + let serialized_signer_message = signer_message.serialize_to_vec(); + let deserialized_signer_message = + read_next::(&mut &serialized_signer_message[..]) + .expect("Failed to deserialize SignerMessage"); + assert_eq!(signer_message, deserialized_signer_message); + + let header = NakamotoBlockHeader::empty(); + let mut block = NakamotoBlock { + header, + txs: vec![], + }; + let tx_merkle_root = { + let txid_vecs = block + .txs + .iter() + .map(|tx| tx.txid().as_bytes().to_vec()) + .collect(); + + MerkleTree::::new(&txid_vecs).root() + }; + block.header.tx_merkle_root = tx_merkle_root; + + let block_proposal = BlockProposal { + block, + burn_height: thread_rng().next_u64(), + reward_cycle: thread_rng().next_u64(), + }; + let signer_message = SignerMessage::BlockProposal(block_proposal); + let serialized_signer_message = signer_message.serialize_to_vec(); + let deserialized_signer_message = + read_next::(&mut &serialized_signer_message[..]) + .expect("Failed to deserialize SignerMessage"); + assert_eq!(signer_message, deserialized_signer_message); + } + + fn random_peer_data() -> PeerInfo { + let burn_block_height = thread_rng().next_u64(); + let stacks_tip_consensus_byte: u8 = thread_rng().gen(); + let stacks_tip_byte: u8 = thread_rng().gen(); + let stacks_tip_height = thread_rng().next_u64(); + let server_version = "0.0.0".to_string(); + let pox_consensus_byte: u8 = thread_rng().gen(); + PeerInfo { + burn_block_height, + stacks_tip_consensus_hash: ConsensusHash([stacks_tip_consensus_byte; 20]), + stacks_tip: BlockHeaderHash([stacks_tip_byte; 32]), + stacks_tip_height, + server_version, + pox_consensus: ConsensusHash([pox_consensus_byte; 20]), + } + } + fn random_mock_sign_data() -> MockSignData { + let chain_byte: u8 = thread_rng().gen_range(0..=1); + let chain_id = if chain_byte == 1 { + CHAIN_ID_TESTNET + } else { + CHAIN_ID_MAINNET + }; + let peer_info = random_peer_data(); + MockSignData { + peer_info, + event_burn_block_height: thread_rng().next_u64(), + chain_id, + } + } + + #[test] + fn verify_sign_mock_signature() { + let private_key = StacksPrivateKey::new(); + let public_key = StacksPublicKey::from_private(&private_key); + + let bad_private_key = StacksPrivateKey::new(); + let bad_public_key = StacksPublicKey::from_private(&bad_private_key); + + let mut mock_signature = MockSignature { + signature: MessageSignature::empty(), + sign_data: random_mock_sign_data(), + }; + assert!(!mock_signature + .verify(&public_key) + .expect("Failed to verify MockSignature")); + + mock_signature + .sign(&private_key) + .expect("Failed to sign MockSignature"); + + assert!(mock_signature + .verify(&public_key) + .expect("Failed to verify MockSignature")); + assert!(!mock_signature + .verify(&bad_public_key) + .expect("Failed to verify MockSignature")); + } + + #[test] + fn serde_peer_data() { + let peer_data = random_peer_data(); + let serialized_data = peer_data.serialize_to_vec(); + let deserialized_data = read_next::(&mut &serialized_data[..]) + .expect("Failed to deserialize PeerInfo"); + assert_eq!(peer_data, deserialized_data); + } + + #[test] + fn serde_mock_signature() { + let mock_signature = MockSignature { + signature: MessageSignature::empty(), + sign_data: random_mock_sign_data(), + }; + let serialized_signature = mock_signature.serialize_to_vec(); + let deserialized_signature = read_next::(&mut &serialized_signature[..]) + .expect("Failed to deserialize MockSignature"); + assert_eq!(mock_signature, deserialized_signature); + } + + #[test] + fn serde_sign_data() { + let sign_data = random_mock_sign_data(); + let serialized_data = sign_data.serialize_to_vec(); + let deserialized_data = read_next::(&mut &serialized_data[..]) + .expect("Failed to deserialize MockSignData"); + assert_eq!(sign_data, deserialized_data); + } +} diff --git a/libsigner/src/v0/mod.rs b/libsigner/src/v0/mod.rs new file mode 100644 index 00000000000..703acb85f6a --- /dev/null +++ b/libsigner/src/v0/mod.rs @@ -0,0 +1,17 @@ +// Copyright (C) 2020-2024 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +/// Messages for the v0 signer +pub mod messages; diff --git a/libsigner/src/messages.rs b/libsigner/src/v1/messages.rs similarity index 89% rename from libsigner/src/messages.rs rename to libsigner/src/v1/messages.rs index 1b6e7f179fc..b412d9a66ff 100644 --- a/libsigner/src/messages.rs +++ b/libsigner/src/v1/messages.rs @@ -38,6 +38,7 @@ use blockstack_lib::net::api::postblock_proposal::{ BlockValidateReject, BlockValidateResponse, ValidateRejectCode, }; use blockstack_lib::util_lib::boot::boot_code_id; +use clarity::util::retry::BoundReader; use clarity::vm::types::serialization::SerializationError; use clarity::vm::types::QualifiedContractIdentifier; use hashbrown::{HashMap, HashSet}; @@ -63,7 +64,7 @@ use wsts::schnorr::ID; use wsts::state_machine::{signer, SignError}; use crate::http::{decode_http_body, decode_http_request}; -use crate::EventError; +use crate::{EventError, MessageSlotID as MessageSlotIDTrait, SignerMessage as SignerMessageTrait}; define_u8_enum!( /// Enum representing the stackerdb message identifier: this is @@ -94,16 +95,42 @@ MessageSlotID { /// Transactions list for miners and signers to observe Transactions = 11, /// DKG Results - DkgResults = 12 + DkgResults = 12, + /// Persisted encrypted signer state containing DKG shares + EncryptedSignerState = 13 }); -define_u8_enum!(SignerMessageTypePrefix { +impl MessageSlotIDTrait for MessageSlotID { + fn stacker_db_contract(&self, mainnet: bool, reward_cycle: u64) -> QualifiedContractIdentifier { + NakamotoSigners::make_signers_db_contract_id(reward_cycle, self.to_u32(), mainnet) + } + fn all() -> &'static [Self] { + MessageSlotID::ALL + } +} + +impl SignerMessageTrait for SignerMessage { + fn msg_id(&self) -> Option { + Some(self.msg_id()) + } +} + +define_u8_enum!( +/// Enum representing the signer message type prefix +SignerMessageTypePrefix { + /// A block response message BlockResponse = 0, + /// A wsts packet message Packet = 1, + /// A list of transactions that a signer cares about Transactions = 2, - DkgResults = 3 + /// The results of a successful DKG + DkgResults = 3, + /// The encrypted state of the signer to be persisted + EncryptedSignerState = 4 }); +#[cfg_attr(test, mutants::skip)] impl MessageSlotID { /// Return the StackerDB contract corresponding to messages of this type pub fn stacker_db_contract( @@ -136,26 +163,40 @@ impl TryFrom for SignerMessageTypePrefix { } impl From<&SignerMessage> for SignerMessageTypePrefix { + #[cfg_attr(test, mutants::skip)] fn from(message: &SignerMessage) -> Self { match message { SignerMessage::Packet(_) => SignerMessageTypePrefix::Packet, SignerMessage::BlockResponse(_) => SignerMessageTypePrefix::BlockResponse, SignerMessage::Transactions(_) => SignerMessageTypePrefix::Transactions, SignerMessage::DkgResults { .. } => SignerMessageTypePrefix::DkgResults, + SignerMessage::EncryptedSignerState(_) => SignerMessageTypePrefix::EncryptedSignerState, } } } -define_u8_enum!(MessageTypePrefix { +define_u8_enum!( +/// Enum representing the message type prefix +MessageTypePrefix { + /// DkgBegin message DkgBegin = 0, + /// DkgPrivateBegin message DkgPrivateBegin = 1, + /// DkgEndBegin message DkgEndBegin = 2, + /// DkgEnd message DkgEnd = 3, + /// DkgPublicShares message DkgPublicShares = 4, + /// DkgPrivateShares message DkgPrivateShares = 5, + /// NonceRequest message NonceRequest = 6, + /// NonceResponse message NonceResponse = 7, + /// SignatureShareRequest message SignatureShareRequest = 8, + /// SignatureShareResponse message SignatureShareResponse = 9 }); @@ -185,13 +226,22 @@ impl TryFrom for MessageTypePrefix { } } -define_u8_enum!(RejectCodeTypePrefix{ +define_u8_enum!( +/// Enum representing the reject code type prefix +RejectCodeTypePrefix { + /// Validation failed ValidationFailed = 0, + /// Signed rejection SignedRejection = 1, + /// Insufficient signers InsufficientSigners = 2, + /// Missing transactions MissingTransactions = 3, + /// Connectivity issues ConnectivityIssues = 4, + /// Nonce timeout NonceTimeout = 5, + /// Aggregator error AggregatorError = 6 }); @@ -234,9 +284,12 @@ pub enum SignerMessage { /// The polynomial commits used to construct the aggregate key party_polynomials: Vec<(u32, PolyCommitment)>, }, + /// The encrypted state of the signer to be persisted + EncryptedSignerState(Vec), } impl Debug for SignerMessage { + #[cfg_attr(test, mutants::skip)] fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { match self { Self::BlockResponse(b) => Debug::fmt(b, f), @@ -255,12 +308,16 @@ impl Debug for SignerMessage { .field("party_polynomials", &party_polynomials) .finish() } + Self::EncryptedSignerState(s) => { + f.debug_tuple("EncryptedSignerState").field(s).finish() + } } } } impl SignerMessage { /// Helper function to determine the slot ID for the provided stacker-db writer id + #[cfg_attr(test, mutants::skip)] pub fn msg_id(&self) -> MessageSlotID { match self { Self::Packet(packet) => match packet.msg { @@ -278,6 +335,7 @@ impl SignerMessage { Self::BlockResponse(_) => MessageSlotID::BlockResponse, Self::Transactions(_) => MessageSlotID::Transactions, Self::DkgResults { .. } => MessageSlotID::DkgResults, + Self::EncryptedSignerState(_) => MessageSlotID::EncryptedSignerState, } } } @@ -345,10 +403,14 @@ impl StacksMessageCodec for SignerMessage { party_polynomials.iter().map(|(a, b)| (a, b)), )?; } + SignerMessage::EncryptedSignerState(encrypted_state) => { + write_next(fd, encrypted_state)?; + } }; Ok(()) } + #[cfg_attr(test, mutants::skip)] fn consensus_deserialize(fd: &mut R) -> Result { let type_prefix_byte = read_next::(fd)?; let type_prefix = SignerMessageTypePrefix::try_from(type_prefix_byte)?; @@ -383,6 +445,15 @@ impl StacksMessageCodec for SignerMessage { party_polynomials, } } + SignerMessageTypePrefix::EncryptedSignerState => { + // Typically the size of the signer state is much smaller, but in the fully degenerate case the size of the persisted state is + // 2800 * 32 * 4 + C for some small constant C. + // To have some margin, we're expanding the left term with an additional factor 4 + let max_encrypted_state_size = 2800 * 32 * 4 * 4; + let mut bound_reader = BoundReader::from_reader(fd, max_encrypted_state_size); + let encrypted_state = read_next::<_, _>(&mut bound_reader)?; + SignerMessage::EncryptedSignerState(encrypted_state) + } }; Ok(message) } @@ -390,7 +461,9 @@ impl StacksMessageCodec for SignerMessage { /// Work around for the fact that a lot of the structs being desierialized are not defined in messages.rs pub trait StacksMessageCodecExtensions: Sized { + /// Serialize the struct to the provided writer fn inner_consensus_serialize(&self, fd: &mut W) -> Result<(), CodecError>; + /// Deserialize the struct from the provided reader fn inner_consensus_deserialize(fd: &mut R) -> Result; } @@ -510,50 +583,87 @@ impl StacksMessageCodecExtensions for HashSet { } } +define_u8_enum!( +/// Enum representing the DKG failure type prefix +DkgFailureTypePrefix { + /// Bad state + BadState = 0, + /// Missing public shares + MissingPublicShares = 1, + /// Bad public shares + BadPublicShares = 2, + /// Missing private shares + MissingPrivateShares = 3, + /// Bad private shares + BadPrivateShares = 4 +}); + +impl TryFrom for DkgFailureTypePrefix { + type Error = CodecError; + fn try_from(value: u8) -> Result { + Self::from_u8(value).ok_or_else(|| { + CodecError::DeserializeError(format!("Unknown DKG failure type prefix: {value}")) + }) + } +} + +impl From<&DkgFailure> for DkgFailureTypePrefix { + fn from(failure: &DkgFailure) -> Self { + match failure { + DkgFailure::BadState => DkgFailureTypePrefix::BadState, + DkgFailure::MissingPublicShares(_) => DkgFailureTypePrefix::MissingPublicShares, + DkgFailure::BadPublicShares(_) => DkgFailureTypePrefix::BadPublicShares, + DkgFailure::MissingPrivateShares(_) => DkgFailureTypePrefix::MissingPrivateShares, + DkgFailure::BadPrivateShares(_) => DkgFailureTypePrefix::BadPrivateShares, + } + } +} + impl StacksMessageCodecExtensions for DkgFailure { fn inner_consensus_serialize(&self, fd: &mut W) -> Result<(), CodecError> { + write_next(fd, &(DkgFailureTypePrefix::from(self) as u8))?; match self { - DkgFailure::BadState => write_next(fd, &0u8), + DkgFailure::BadState => { + // No additional data to serialize + } DkgFailure::MissingPublicShares(shares) => { - write_next(fd, &1u8)?; - shares.inner_consensus_serialize(fd) + shares.inner_consensus_serialize(fd)?; } DkgFailure::BadPublicShares(shares) => { - write_next(fd, &2u8)?; - shares.inner_consensus_serialize(fd) + shares.inner_consensus_serialize(fd)?; } DkgFailure::MissingPrivateShares(shares) => { - write_next(fd, &3u8)?; - shares.inner_consensus_serialize(fd) + shares.inner_consensus_serialize(fd)?; } DkgFailure::BadPrivateShares(shares) => { - write_next(fd, &4u8)?; write_next(fd, &(shares.len() as u32))?; for (id, share) in shares { write_next(fd, id)?; share.inner_consensus_serialize(fd)?; } - Ok(()) } } + Ok(()) } + fn inner_consensus_deserialize(fd: &mut R) -> Result { - let failure_type_prefix = read_next::(fd)?; + let failure_type_prefix_byte = read_next::(fd)?; + let failure_type_prefix = DkgFailureTypePrefix::try_from(failure_type_prefix_byte)?; let failure_type = match failure_type_prefix { - 0 => DkgFailure::BadState, - 1 => { + DkgFailureTypePrefix::BadState => DkgFailure::BadState, + DkgFailureTypePrefix::MissingPublicShares => { let set = HashSet::::inner_consensus_deserialize(fd)?; DkgFailure::MissingPublicShares(set) } - 2 => { + DkgFailureTypePrefix::BadPublicShares => { let set = HashSet::::inner_consensus_deserialize(fd)?; DkgFailure::BadPublicShares(set) } - 3 => { + DkgFailureTypePrefix::MissingPrivateShares => { let set = HashSet::::inner_consensus_deserialize(fd)?; DkgFailure::MissingPrivateShares(set) } - 4 => { + DkgFailureTypePrefix::BadPrivateShares => { let mut map = HashMap::new(); let len = read_next::(fd)?; for _ in 0..len { @@ -563,12 +673,6 @@ impl StacksMessageCodecExtensions for DkgFailure { } DkgFailure::BadPrivateShares(map) } - _ => { - return Err(CodecError::DeserializeError(format!( - "Unknown DkgFailure type prefix: {}", - failure_type_prefix - ))) - } }; Ok(failure_type) } @@ -620,34 +724,60 @@ impl StacksMessageCodecExtensions for DkgEndBegin { } } +define_u8_enum!( +/// Enum representing the DKG status type prefix +DkgStatusTypePrefix { + /// Success + Success = 0, + /// Failure + Failure = 1 +}); + +impl TryFrom for DkgStatusTypePrefix { + type Error = CodecError; + fn try_from(value: u8) -> Result { + Self::from_u8(value).ok_or_else(|| { + CodecError::DeserializeError(format!("Unknown DKG status type prefix: {value}")) + }) + } +} + +impl From<&DkgStatus> for DkgStatusTypePrefix { + fn from(status: &DkgStatus) -> Self { + match status { + DkgStatus::Success => DkgStatusTypePrefix::Success, + DkgStatus::Failure(_) => DkgStatusTypePrefix::Failure, + } + } +} + impl StacksMessageCodecExtensions for DkgEnd { fn inner_consensus_serialize(&self, fd: &mut W) -> Result<(), CodecError> { write_next(fd, &self.dkg_id)?; write_next(fd, &self.signer_id)?; + write_next(fd, &(DkgStatusTypePrefix::from(&self.status) as u8))?; match &self.status { - DkgStatus::Success => write_next(fd, &0u8), + DkgStatus::Success => { + // No additional data to serialize + } DkgStatus::Failure(failure) => { - write_next(fd, &1u8)?; - failure.inner_consensus_serialize(fd) + failure.inner_consensus_serialize(fd)?; } } + Ok(()) } + fn inner_consensus_deserialize(fd: &mut R) -> Result { let dkg_id = read_next::(fd)?; let signer_id = read_next::(fd)?; - let status_type_prefix = read_next::(fd)?; + let status_type_prefix_byte = read_next::(fd)?; + let status_type_prefix = DkgStatusTypePrefix::try_from(status_type_prefix_byte)?; let status = match status_type_prefix { - 0 => DkgStatus::Success, - 1 => { + DkgStatusTypePrefix::Success => DkgStatus::Success, + DkgStatusTypePrefix::Failure => { let failure = DkgFailure::inner_consensus_deserialize(fd)?; DkgStatus::Failure(failure) } - _ => { - return Err(CodecError::DeserializeError(format!( - "Unknown DKG status type prefix: {}", - status_type_prefix - ))) - } }; Ok(DkgEnd { dkg_id, @@ -1008,6 +1138,33 @@ impl StacksMessageCodecExtensions for Packet { } } +define_u8_enum!( +/// Enum representing the block response type prefix +BlockResponseTypePrefix { + /// Accepted + Accepted = 0, + /// Rejected + Rejected = 1 +}); + +impl TryFrom for BlockResponseTypePrefix { + type Error = CodecError; + fn try_from(value: u8) -> Result { + Self::from_u8(value).ok_or_else(|| { + CodecError::DeserializeError(format!("Unknown block response type prefix: {value}")) + }) + } +} + +impl From<&BlockResponse> for BlockResponseTypePrefix { + fn from(block_response: &BlockResponse) -> Self { + match block_response { + BlockResponse::Accepted(_) => BlockResponseTypePrefix::Accepted, + BlockResponse::Rejected(_) => BlockResponseTypePrefix::Rejected, + } + } +} + /// The response that a signer sends back to observing miners /// either accepting or rejecting a Nakamoto block with the corresponding reason #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] @@ -1056,14 +1213,13 @@ impl BlockResponse { impl StacksMessageCodec for BlockResponse { fn consensus_serialize(&self, fd: &mut W) -> Result<(), CodecError> { + write_next(fd, &(BlockResponseTypePrefix::from(self) as u8))?; match self { BlockResponse::Accepted((hash, sig)) => { - write_next(fd, &0u8)?; write_next(fd, hash)?; write_next(fd, sig)?; } BlockResponse::Rejected(rejection) => { - write_next(fd, &1u8)?; write_next(fd, rejection)?; } }; @@ -1071,27 +1227,23 @@ impl StacksMessageCodec for BlockResponse { } fn consensus_deserialize(fd: &mut R) -> Result { - let type_prefix = read_next::(fd)?; + let type_prefix_byte = read_next::(fd)?; + let type_prefix = BlockResponseTypePrefix::try_from(type_prefix_byte)?; let response = match type_prefix { - 0 => { + BlockResponseTypePrefix::Accepted => { let hash = read_next::(fd)?; let sig = read_next::(fd)?; BlockResponse::Accepted((hash, sig)) } - 1 => { + BlockResponseTypePrefix::Rejected => { let rejection = read_next::(fd)?; BlockResponse::Rejected(rejection) } - _ => { - return Err(CodecError::DeserializeError(format!( - "Unknown block response type prefix: {}", - type_prefix - ))) - } }; Ok(response) } } + /// A rejection response from a signer for a proposed block #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct BlockRejection { @@ -1312,6 +1464,8 @@ mod test { use super::{StacksMessageCodecExtensions, *}; #[test] + #[should_panic] + // V1 signer slots do not have enough slots in Epoch 2.5. Something will need to be updated! fn signer_slots_count_is_sane() { let slot_identifiers_len = MessageSlotID::ALL.len(); assert!( diff --git a/libsigner/src/v1/mod.rs b/libsigner/src/v1/mod.rs new file mode 100644 index 00000000000..e5a691efb2b --- /dev/null +++ b/libsigner/src/v1/mod.rs @@ -0,0 +1,17 @@ +// Copyright (C) 2020-2024 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +/// Messages for the v1 signer +pub mod messages; diff --git a/libstackerdb/src/libstackerdb.rs b/libstackerdb/src/libstackerdb.rs index 8c38d8be7b5..507d2249f71 100644 --- a/libstackerdb/src/libstackerdb.rs +++ b/libstackerdb/src/libstackerdb.rs @@ -35,6 +35,8 @@ use stacks_common::util::secp256k1::MessageSignature; /// maximum chunk size (16 MB; same as MAX_PAYLOAD_SIZE) pub const STACKERDB_MAX_CHUNK_SIZE: u32 = 16 * 1024 * 1024; +/// CHUNK_SIZE constant for signers StackerDBs (2MB) +pub const SIGNERS_STACKERDB_CHUNK_SIZE: usize = 2 * 1024 * 1024; // 2MB #[cfg(test)] mod tests; diff --git a/pox-locking/Cargo.toml b/pox-locking/Cargo.toml index fd2729048d3..4fbc9885dcc 100644 --- a/pox-locking/Cargo.toml +++ b/pox-locking/Cargo.toml @@ -23,5 +23,8 @@ clarity = { package = "clarity", path = "../clarity" } stacks_common = { package = "stacks-common", path = "../stacks-common" } slog = { version = "2.5.2", features = [ "max_level_trace" ] } +[dev-dependencies] +mutants = "0.0.3" + [features] slog_json = ["stacks_common/slog_json", "clarity/slog_json"] diff --git a/pox-locking/src/events.rs b/pox-locking/src/events.rs index b1547eb2e7e..0a1dc9d3c47 100644 --- a/pox-locking/src/events.rs +++ b/pox-locking/src/events.rs @@ -105,6 +105,8 @@ fn create_event_info_aggregation_code(function_name: &str) -> String { ) } +// TODO: add tests from mutation testing results #4836 +#[cfg_attr(test, mutants::skip)] /// Craft the code snippet to generate the method-specific `data` payload fn create_event_info_data_code( function_name: &str, @@ -467,6 +469,14 @@ fn create_event_info_data_code( end-cycle-id: (some (+ {reward_cycle} u1)), ;; Get start cycle ID start-cycle-id: start-cycle, + ;; equal to args[3] + signer-sig: {signer_sig}, + ;; equal to args[4] + signer-key: {signer_key}, + ;; equal to args[5] + max-amount: {max_amount}, + ;; equal to args[6] + auth-id: {auth_id}, }} }}) "#, @@ -474,6 +484,10 @@ fn create_event_info_data_code( reward_cycle = &args[1], reward_cycle_index = &args.get(2).unwrap_or(&Value::none()), pox_set_offset = pox_set_offset.replace("%height%", "burn-block-height"), + signer_sig = &args.get(3).unwrap_or(&Value::none()), + signer_key = &args.get(4).unwrap_or(&Value::none()), + max_amount = &args.get(5).unwrap_or(&Value::none()), + auth_id = &args.get(6).unwrap_or(&Value::none()), ) } "delegate-stx" => { diff --git a/stacks-common/Cargo.toml b/stacks-common/Cargo.toml index d9f987f5749..d5bfeb44e9e 100644 --- a/stacks-common/Cargo.toml +++ b/stacks-common/Cargo.toml @@ -33,6 +33,7 @@ chrono = "0.4.19" libc = "0.2.82" wsts = { workspace = true } hashbrown = { workspace = true } +rusqlite = { workspace = true, optional = true } [target.'cfg(unix)'.dependencies] nix = "0.23" @@ -51,10 +52,6 @@ features = ["arbitrary_precision", "unbounded_depth"] version = "0.24.3" features = ["serde", "recovery"] -[dependencies.rusqlite] -version = "=0.24.2" -features = ["blob", "serde_json", "i128_blob", "bundled", "trace"] - [dependencies.ed25519-dalek] workspace = true @@ -73,10 +70,14 @@ assert-json-diff = "1.0.0" rand_core = { workspace = true } [features] -default = ["developer-mode"] +default = ["canonical", "developer-mode"] +canonical = ["rusqlite"] developer-mode = [] slog_json = ["slog-json"] -testing = [] +testing = ["canonical"] +serde = [] +bech32_std = [] +bech32_strict = [] [target.'cfg(all(any(target_arch = "x86_64", target_arch = "x86", target_arch = "aarch64"), not(any(target_os="windows"))))'.dependencies] sha2 = { version = "0.10", features = ["asm"] } diff --git a/stacks-common/src/bitvec.rs b/stacks-common/src/bitvec.rs index 01503460688..792532e135f 100644 --- a/stacks-common/src/bitvec.rs +++ b/stacks-common/src/bitvec.rs @@ -1,5 +1,21 @@ -use rusqlite::types::{FromSql, FromSqlError, FromSqlResult, ToSqlOutput, ValueRef}; -use rusqlite::ToSql; +// Copyright (C) 2013-2020 Blockstack PBC, a public benefit corporation +// Copyright (C) 2020-2024 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +#[cfg(feature = "canonical")] +use rusqlite::types::{FromSql, FromSqlError, FromSqlResult, ToSql, ToSqlOutput, ValueRef}; use serde::{Deserialize, Serialize}; use crate::codec::{ @@ -90,6 +106,7 @@ impl<'de, const MAX_SIZE: u16> Deserialize<'de> for BitVec { } } +#[cfg(feature = "canonical")] impl FromSql for BitVec { fn column_result(value: ValueRef<'_>) -> FromSqlResult { let bytes = hex_bytes(value.as_str()?).map_err(|e| FromSqlError::Other(Box::new(e)))?; @@ -98,6 +115,7 @@ impl FromSql for BitVec { } } +#[cfg(feature = "canonical")] impl ToSql for BitVec { fn to_sql(&self) -> rusqlite::Result> { let hex = bytes_to_hex(self.serialize_to_vec().as_slice()); @@ -105,6 +123,33 @@ impl ToSql for BitVec { } } +pub struct BitVecIter<'a, const MAX_SIZE: u16> { + index: u16, + byte: Option<&'a u8>, + bitvec: &'a BitVec, +} + +impl<'a, const MAX_SIZE: u16> Iterator for BitVecIter<'a, MAX_SIZE> { + type Item = bool; + + fn next(&mut self) -> Option { + if self.index >= self.bitvec.len { + return None; + } + let byte = self.byte?; + let next = (*byte & BitVec::::bit_index(self.index)) != 0; + self.index = self.index.saturating_add(1); + if self.index < self.bitvec.len { + // check if byte needs to be incremented + if self.index % 8 == 0 { + let vec_index = usize::from(self.index / 8); + self.byte = self.bitvec.data.get(vec_index); + } + } + Some(next) + } +} + impl BitVec { /// Construct a new BitVec with all entries set to `false` and total length `len` pub fn zeros(len: u16) -> Result, String> { @@ -117,10 +162,29 @@ impl BitVec { Ok(BitVec { data, len }) } + /// Construct a new BitVec with all entries set to `true` and total length `len` + pub fn ones(len: u16) -> Result, String> { + let mut bitvec: BitVec = BitVec::zeros(len)?; + for i in 0..len { + bitvec.set(i, true)?; + } + Ok(bitvec) + } + + pub fn iter(&self) -> BitVecIter { + let byte = self.data.get(0); + BitVecIter { + index: 0, + bitvec: self, + byte, + } + } + pub fn len(&self) -> u16 { self.len } + /// Return the number of bytes needed to store `len` bits. fn data_len(len: u16) -> u16 { len / 8 + if len % 8 == 0 { 0 } else { 1 } } @@ -169,12 +233,30 @@ impl BitVec { self.data[i] = 0; } } + + /// Serialize a BitVec to a string of 1s and 0s for display + /// purposes. For example, a BitVec with [true, false, true] + /// will be serialized to "101". + pub fn binary_str(&self) -> String { + self.clone() + .data + .into_iter() + .fold(String::new(), |acc, byte| { + acc + &format!("{:08b}", byte).chars().rev().collect::() + }) + .chars() + .take(self.len() as usize) + .collect::() + } } #[cfg(test)] mod test { + use serde_json; + use super::BitVec; use crate::codec::StacksMessageCodec; + use crate::util::hash::to_hex; fn check_set_get(mut input: BitVec<{ u16::MAX }>) { let original_input = input.clone(); @@ -204,6 +286,15 @@ mod test { assert!(input.set(input.len(), false).is_err()); } + fn check_iter(input: &BitVec<{ u16::MAX }>) { + let mut checked = 0; + for (ix, entry) in input.iter().enumerate() { + checked += 1; + assert_eq!(input.get(u16::try_from(ix).unwrap()).unwrap(), entry); + } + assert_eq!(checked, input.len()); + } + fn check_serialization(input: &BitVec<{ u16::MAX }>) { let byte_ser = input.serialize_to_vec(); let deserialized = BitVec::consensus_deserialize(&mut byte_ser.as_slice()).unwrap(); @@ -240,6 +331,7 @@ mod test { } check_serialization(&bitvec); + check_iter(&bitvec); check_set_get(bitvec); } @@ -258,6 +350,31 @@ mod test { ); } + #[test] + fn binary_str_serialization() { + let mut bitvec_zero_10 = BitVec::<10>::zeros(10).unwrap(); + bitvec_zero_10.set(0, true).unwrap(); + bitvec_zero_10.set(5, true).unwrap(); + bitvec_zero_10.set(3, true).unwrap(); + assert_eq!( + bitvec_zero_10.binary_str(), + "1001010000", + "Binary string should be 1001010000" + ); + } + + #[test] + fn bitvec_ones() { + let bitvec_ones_10 = BitVec::<10>::ones(10).unwrap(); + for i in 0..10 { + assert!( + bitvec_ones_10.get(i).unwrap(), + "All values of ones vec should be true" + ); + } + info!("bitvec_ones_10: {:?}", bitvec_ones_10.binary_str()); + } + #[test] fn vectors() { let mut inputs = vec![ diff --git a/stacks-common/src/deps_common/bech32/mod.rs b/stacks-common/src/deps_common/bech32/mod.rs index 99f95e9cd6b..655f2b1a822 100644 --- a/stacks-common/src/deps_common/bech32/mod.rs +++ b/stacks-common/src/deps_common/bech32/mod.rs @@ -30,7 +30,7 @@ //! has more details. //! #![cfg_attr( - feature = "std", + feature = "bech32_std", doc = " # Examples ``` @@ -54,20 +54,20 @@ assert_eq!(variant, Variant::Bech32); #![deny(non_camel_case_types)] #![deny(non_snake_case)] #![deny(unused_mut)] -#![cfg_attr(feature = "strict", deny(warnings))] +#![cfg_attr(feature = "bech32_strict", deny(warnings))] -#[cfg(all(not(feature = "std"), not(test)))] +#[cfg(all(not(feature = "bech32_std"), not(test)))] extern crate alloc; -#[cfg(any(test, feature = "std"))] +#[cfg(any(test, feature = "bech32_std"))] extern crate core; -#[cfg(all(not(feature = "std"), not(test)))] +#[cfg(all(not(feature = "bech32_std"), not(test)))] use alloc::borrow::Cow; -#[cfg(all(not(feature = "std"), not(test)))] +#[cfg(all(not(feature = "bech32_std"), not(test)))] use alloc::{string::String, vec::Vec}; use core::{fmt, mem}; -#[cfg(any(feature = "std", test))] +#[cfg(any(feature = "bech32_std", test))] use std::borrow::Cow; /// Integer in the range `0..32` @@ -690,7 +690,7 @@ impl fmt::Display for Error { } } -#[cfg(any(feature = "std", test))] +#[cfg(any(feature = "bech32_std", test))] impl std::error::Error for Error { fn description(&self) -> &str { match *self { diff --git a/stacks-common/src/deps_common/bitcoin/blockdata/script.rs b/stacks-common/src/deps_common/bitcoin/blockdata/script.rs index 8b08ab998a0..be5a6144c70 100644 --- a/stacks-common/src/deps_common/bitcoin/blockdata/script.rs +++ b/stacks-common/src/deps_common/bitcoin/blockdata/script.rs @@ -201,7 +201,7 @@ fn build_scriptint(n: i64) -> Vec { let neg = n < 0; - let mut abs = n.abs() as usize; + let mut abs = n.unsigned_abs() as usize; let mut v = Vec::with_capacity(size_of::() + 1); while abs > 0xFF { v.push((abs & 0xFF) as u8); diff --git a/stacks-common/src/deps_common/bitcoin/mod.rs b/stacks-common/src/deps_common/bitcoin/mod.rs index 890825ea987..b70da5deb21 100644 --- a/stacks-common/src/deps_common/bitcoin/mod.rs +++ b/stacks-common/src/deps_common/bitcoin/mod.rs @@ -26,8 +26,8 @@ //! // Clippy flags -#![cfg_attr(feature = "clippy", allow(needless_range_loop))] // suggests making a big mess of array newtypes -#![cfg_attr(feature = "clippy", allow(extend_from_slice))] // `extend_from_slice` only available since 1.6 +#![allow(clippy::needless_range_loop)] // suggests making a big mess of array newtypes +#![allow(clippy::extend_from_slice)] // `extend_from_slice` only available since 1.6 // Coding conventions #![deny(non_upper_case_globals)] diff --git a/stacks-common/src/deps_common/bitcoin/util/hash.rs b/stacks-common/src/deps_common/bitcoin/util/hash.rs index 3e9186bd92b..daa1de33601 100644 --- a/stacks-common/src/deps_common/bitcoin/util/hash.rs +++ b/stacks-common/src/deps_common/bitcoin/util/hash.rs @@ -29,6 +29,7 @@ use crate::deps_common::bitcoin::network::encodable::{ConsensusDecodable, Consen use crate::deps_common::bitcoin::network::serialize::{ self, BitcoinHash, RawEncoder, SimpleEncoder, }; +use crate::util::hash::bytes_to_hex; use crate::util::uint::Uint256; use crate::util::HexError; @@ -49,6 +50,24 @@ impl_array_newtype!(Ripemd160Hash, u8, 20); /// A Bitcoin hash160, 20-bytes, computed from x as RIPEMD160(SHA256(x)) pub struct Hash160([u8; 20]); impl_array_newtype!(Hash160, u8, 20); +impl_byte_array_rusqlite_only!(Hash160); + +impl Hash160 { + /// Convert the Hash160 inner bytes to a non-prefixed hex string + pub fn to_hex(&self) -> String { + bytes_to_hex(&self.0) + } + + /// Try to instantiate a Hash160 using the exact inner bytes of the hash. + pub fn from_bytes(bytes: &[u8]) -> Option { + let mut return_bytes = [0; 20]; + if bytes.len() != return_bytes.len() { + return None; + } + return_bytes.copy_from_slice(bytes); + Some(Self(return_bytes)) + } +} impl Default for Sha256dEncoder { fn default() -> Self { diff --git a/stacks-common/src/deps_common/httparse/mod.rs b/stacks-common/src/deps_common/httparse/mod.rs index 5d572585b8e..67ca2c52cdd 100644 --- a/stacks-common/src/deps_common/httparse/mod.rs +++ b/stacks-common/src/deps_common/httparse/mod.rs @@ -22,7 +22,6 @@ #![cfg_attr(test, deny(warnings))] // we can't upgrade while supporting Rust 1.3 #![allow(deprecated)] -#![cfg_attr(httparse_min_2018, allow(rust_2018_idioms))] //! # httparse //! @@ -1280,7 +1279,6 @@ mod tests { ); } - #[cfg(feature = "std")] #[test] fn test_std_error() { use std::error::Error as StdError; diff --git a/stacks-common/src/libcommon.rs b/stacks-common/src/libcommon.rs index 0a9fa9d641c..5059f6f049d 100644 --- a/stacks-common/src/libcommon.rs +++ b/stacks-common/src/libcommon.rs @@ -37,6 +37,7 @@ use crate::types::chainstate::{BlockHeaderHash, BurnchainHeaderHash, SortitionId pub mod consts { use crate::types::chainstate::{BlockHeaderHash, ConsensusHash}; + pub use crate::types::MINING_COMMITMENT_WINDOW; pub const TOKEN_TRANSFER_MEMO_LENGTH: usize = 34; // same as it is in Stacks v1 @@ -63,4 +64,48 @@ pub mod consts { /// The number of StackerDB slots each signing key needs /// to use to participate in DKG and block validation signing. pub const SIGNER_SLOTS_PER_USER: u32 = 13; + + /// peer version (big-endian) + /// first byte == major network protocol version (currently 0x18) + /// second and third bytes are unused + /// fourth byte == highest epoch supported by this node + pub const PEER_VERSION_MAINNET_MAJOR: u32 = 0x18000000; + pub const PEER_VERSION_TESTNET_MAJOR: u32 = 0xfacade00; + + pub const PEER_VERSION_EPOCH_1_0: u8 = 0x00; + pub const PEER_VERSION_EPOCH_2_0: u8 = 0x00; + pub const PEER_VERSION_EPOCH_2_05: u8 = 0x05; + pub const PEER_VERSION_EPOCH_2_1: u8 = 0x06; + pub const PEER_VERSION_EPOCH_2_2: u8 = 0x07; + pub const PEER_VERSION_EPOCH_2_3: u8 = 0x08; + pub const PEER_VERSION_EPOCH_2_4: u8 = 0x09; + pub const PEER_VERSION_EPOCH_2_5: u8 = 0x0a; + pub const PEER_VERSION_EPOCH_3_0: u8 = 0x0b; + + /// this should be updated to the latest network epoch version supported by + /// this node. this will be checked by the `validate_epochs()` method. + pub const PEER_NETWORK_EPOCH: u32 = PEER_VERSION_EPOCH_2_5 as u32; + + /// set the fourth byte of the peer version + pub const PEER_VERSION_MAINNET: u32 = PEER_VERSION_MAINNET_MAJOR | PEER_NETWORK_EPOCH; + pub const PEER_VERSION_TESTNET: u32 = PEER_VERSION_TESTNET_MAJOR | PEER_NETWORK_EPOCH; + + /// network identifiers + pub const NETWORK_ID_MAINNET: u32 = 0x17000000; + pub const NETWORK_ID_TESTNET: u32 = 0xff000000; +} + +/// This test asserts that the constant above doesn't change. +/// This exists because the constant above is used by Epoch 2.5 instantiation code. +/// +/// Adding more slots will require instantiating more .signers contracts through either +/// consensus changes (i.e., a new epoch) or through non-consensus-critical contract +/// deployments. +#[test] +fn signer_slots_count_2_5() { + assert_eq!( + consts::SIGNER_SLOTS_PER_USER, + 13, + "The .signers-x-y contracts in Epoch 2.5 were instantiated with 13 slots" + ); } diff --git a/stacks-common/src/types/chainstate.rs b/stacks-common/src/types/chainstate.rs index d41e21225db..47d6c3c499b 100644 --- a/stacks-common/src/types/chainstate.rs +++ b/stacks-common/src/types/chainstate.rs @@ -4,7 +4,6 @@ use std::str::FromStr; use curve25519_dalek::digest::Digest; use rand::{Rng, SeedableRng}; -use rusqlite::types::{FromSql, FromSqlError, FromSqlResult, ToSql, ToSqlOutput, ValueRef}; use serde::de::{Deserialize, Error as de_Error}; use serde::ser::Error as ser_Error; use serde::Serialize; @@ -64,7 +63,6 @@ pub struct SortitionId(pub [u8; 32]); impl_array_newtype!(SortitionId, u8, 32); impl_array_hexstring_fmt!(SortitionId); impl_byte_array_newtype!(SortitionId, u8, 32); -impl_byte_array_rusqlite_only!(SortitionId); pub struct VRFSeed(pub [u8; 32]); impl_array_newtype!(VRFSeed, u8, 32); @@ -247,9 +245,18 @@ pub struct StacksBlockId(pub [u8; 32]); impl_array_newtype!(StacksBlockId, u8, 32); impl_array_hexstring_fmt!(StacksBlockId); impl_byte_array_newtype!(StacksBlockId, u8, 32); -impl_byte_array_rusqlite_only!(StacksBlockId); impl_byte_array_serde!(StacksBlockId); +/// A newtype for `StacksBlockId` that indicates a block is a tenure-change +/// block. This helps to explicitly differentiate tenure-change blocks in the +/// code. +pub struct TenureBlockId(pub StacksBlockId); +impl From for TenureBlockId { + fn from(id: StacksBlockId) -> TenureBlockId { + TenureBlockId(id) + } +} + pub struct ConsensusHash(pub [u8; 20]); impl_array_newtype!(ConsensusHash, u8, 20); impl_array_hexstring_fmt!(ConsensusHash); @@ -323,18 +330,6 @@ impl StacksMessageCodec for StacksWorkScore { } } -// Implement rusqlite traits for a bunch of structs that used to be defined -// in the chainstate code -impl_byte_array_rusqlite_only!(ConsensusHash); -impl_byte_array_rusqlite_only!(Hash160); -impl_byte_array_rusqlite_only!(BlockHeaderHash); -impl_byte_array_rusqlite_only!(VRFSeed); -impl_byte_array_rusqlite_only!(BurnchainHeaderHash); -impl_byte_array_rusqlite_only!(VRFProof); -impl_byte_array_rusqlite_only!(TrieHash); -impl_byte_array_rusqlite_only!(Sha512Trunc256Sum); -impl_byte_array_rusqlite_only!(MessageSignature); - impl_byte_array_message_codec!(TrieHash, TRIEHASH_ENCODED_SIZE as u32); impl_byte_array_message_codec!(Sha512Trunc256Sum, 32); @@ -400,21 +395,6 @@ impl BurnchainHeaderHash { } } -impl FromSql for Sha256dHash { - fn column_result(value: ValueRef) -> FromSqlResult { - let hex_str = value.as_str()?; - let hash = Sha256dHash::from_hex(hex_str).map_err(|_e| FromSqlError::InvalidType)?; - Ok(hash) - } -} - -impl ToSql for Sha256dHash { - fn to_sql(&self) -> rusqlite::Result { - let hex_str = self.be_hex_string(); - Ok(hex_str.into()) - } -} - impl VRFSeed { /// First-ever VRF seed from the genesis block. It's all 0's pub fn initial() -> VRFSeed { diff --git a/stacks-common/src/types/mod.rs b/stacks-common/src/types/mod.rs index 2652347273b..c9953594597 100644 --- a/stacks-common/src/types/mod.rs +++ b/stacks-common/src/types/mod.rs @@ -1,6 +1,9 @@ use std::cmp::Ordering; use std::fmt; +#[cfg(feature = "canonical")] +pub mod sqlite; + use crate::address::c32::{c32_address, c32_address_decode}; use crate::address::{ public_keys_to_address_hash, to_bits_p2pkh, AddressHashMode, @@ -55,10 +58,15 @@ pub trait Address: Clone + fmt::Debug + fmt::Display { fn is_burn(&self) -> bool; } -pub const PEER_VERSION_EPOCH_1_0: u8 = 0x00; -pub const PEER_VERSION_EPOCH_2_0: u8 = 0x00; -pub const PEER_VERSION_EPOCH_2_05: u8 = 0x05; -pub const PEER_VERSION_EPOCH_2_1: u8 = 0x06; +// sliding burnchain window over which a miner's past block-commit payouts will be used to weight +// its current block-commit in a sortition. +// This is the value used in epoch 2.x +pub const MINING_COMMITMENT_WINDOW: u8 = 6; + +// how often a miner must commit in its mining commitment window in order to even be considered for +// sortition. +// Only relevant for Nakamoto (epoch 3.x) +pub const MINING_COMMITMENT_FREQUENCY_NAKAMOTO: u8 = 3; #[repr(u32)] #[derive(Debug, Clone, Eq, PartialEq, PartialOrd, Ord, Hash, Copy, Serialize, Deserialize)] @@ -74,11 +82,31 @@ pub enum StacksEpochId { Epoch30 = 0x03000, } +pub enum MempoolCollectionBehavior { + ByStacksHeight, + ByReceiveTime, +} + impl StacksEpochId { pub fn latest() -> StacksEpochId { StacksEpochId::Epoch30 } + /// In this epoch, how should the mempool perform garbage collection? + pub fn mempool_garbage_behavior(&self) -> MempoolCollectionBehavior { + match self { + StacksEpochId::Epoch10 + | StacksEpochId::Epoch20 + | StacksEpochId::Epoch2_05 + | StacksEpochId::Epoch21 + | StacksEpochId::Epoch22 + | StacksEpochId::Epoch23 + | StacksEpochId::Epoch24 + | StacksEpochId::Epoch25 => MempoolCollectionBehavior::ByStacksHeight, + StacksEpochId::Epoch30 => MempoolCollectionBehavior::ByReceiveTime, + } + } + /// Returns whether or not this Epoch should perform /// memory checks during analysis pub fn analysis_memory(&self) -> bool { @@ -108,6 +136,38 @@ impl StacksEpochId { } } + /// Whether or not this epoch supports the punishment of PoX reward + /// recipients using the bitvec scheme + pub fn allows_pox_punishment(&self) -> bool { + match self { + StacksEpochId::Epoch10 + | StacksEpochId::Epoch20 + | StacksEpochId::Epoch2_05 + | StacksEpochId::Epoch21 + | StacksEpochId::Epoch22 + | StacksEpochId::Epoch23 + | StacksEpochId::Epoch24 + | StacksEpochId::Epoch25 => false, + StacksEpochId::Epoch30 => true, + } + } + + /// Whether or not this epoch interprets block commit OPs block hash field + /// as a new block hash or the StacksBlockId of a new tenure's parent tenure. + pub fn block_commits_to_parent(&self) -> bool { + match self { + StacksEpochId::Epoch10 + | StacksEpochId::Epoch20 + | StacksEpochId::Epoch2_05 + | StacksEpochId::Epoch21 + | StacksEpochId::Epoch22 + | StacksEpochId::Epoch23 + | StacksEpochId::Epoch24 + | StacksEpochId::Epoch25 => false, + StacksEpochId::Epoch30 => true, + } + } + /// Does this epoch support unlocking PoX contributors that miss a slot? /// /// Epoch 2.0 - 2.05 didn't support this feature, but they weren't epoch-guarded on it. Instead, @@ -117,6 +177,63 @@ impl StacksEpochId { pub fn supports_pox_missed_slot_unlocks(&self) -> bool { self < &StacksEpochId::Epoch25 } + + /// What is the sortition mining commitment window for this epoch? + pub fn mining_commitment_window(&self) -> u8 { + MINING_COMMITMENT_WINDOW + } + + /// How often must a miner mine in order to be considered for sortition in its commitment + /// window? + pub fn mining_commitment_frequency(&self) -> u8 { + match self { + StacksEpochId::Epoch10 + | StacksEpochId::Epoch20 + | StacksEpochId::Epoch2_05 + | StacksEpochId::Epoch21 + | StacksEpochId::Epoch22 + | StacksEpochId::Epoch23 + | StacksEpochId::Epoch24 + | StacksEpochId::Epoch25 => 0, + StacksEpochId::Epoch30 => MINING_COMMITMENT_FREQUENCY_NAKAMOTO, + } + } + + /// Returns true for epochs which use Nakamoto blocks. These blocks use a + /// different header format than the previous Stacks blocks, which among + /// other changes includes a Stacks-specific timestamp. + pub fn uses_nakamoto_blocks(&self) -> bool { + self >= &StacksEpochId::Epoch30 + } + + /// Returns whether or not this epoch uses the tip for reading burn block + /// info in Clarity (3.0+ behavior) or should use the parent block's burn + /// block (behavior before 3.0). + pub fn clarity_uses_tip_burn_block(&self) -> bool { + self >= &StacksEpochId::Epoch30 + } + + /// Does this epoch use the nakamoto reward set, or the epoch2 reward set? + /// We use the epoch2 reward set in all pre-3.0 epochs. + /// We also use the epoch2 reward set in the first 3.0 reward cycle. + /// After that, we use the nakamoto reward set. + pub fn uses_nakamoto_reward_set( + &self, + cur_reward_cycle: u64, + first_epoch30_reward_cycle: u64, + ) -> bool { + match self { + StacksEpochId::Epoch10 + | StacksEpochId::Epoch20 + | StacksEpochId::Epoch2_05 + | StacksEpochId::Epoch21 + | StacksEpochId::Epoch22 + | StacksEpochId::Epoch23 + | StacksEpochId::Epoch24 + | StacksEpochId::Epoch25 => false, + StacksEpochId::Epoch30 => cur_reward_cycle > first_epoch30_reward_cycle, + } + } } impl std::fmt::Display for StacksEpochId { diff --git a/stacks-common/src/types/sqlite.rs b/stacks-common/src/types/sqlite.rs new file mode 100644 index 00000000000..183ec61fbc6 --- /dev/null +++ b/stacks-common/src/types/sqlite.rs @@ -0,0 +1,57 @@ +// Copyright (C) 2013-2020 Blockstack PBC, a public benefit corporation +// Copyright (C) 2020-2023 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use rusqlite::types::{FromSql, FromSqlError, FromSqlResult, ToSql, ToSqlOutput, ValueRef}; + +use super::chainstate::VRFSeed; +use crate::deps_common::bitcoin::util::hash::Sha256dHash; +use crate::types::chainstate::{ + BlockHeaderHash, BurnchainHeaderHash, ConsensusHash, SortitionId, StacksBlockId, TrieHash, +}; +use crate::util::hash::{Hash160, Sha512Trunc256Sum}; +use crate::util::secp256k1::MessageSignature; +use crate::util::vrf::VRFProof; + +pub const NO_PARAMS: &[&dyn ToSql] = &[]; + +impl FromSql for Sha256dHash { + fn column_result(value: ValueRef) -> FromSqlResult { + let hex_str = value.as_str()?; + let hash = Sha256dHash::from_hex(hex_str).map_err(|_e| FromSqlError::InvalidType)?; + Ok(hash) + } +} + +impl ToSql for Sha256dHash { + fn to_sql(&self) -> rusqlite::Result { + let hex_str = self.be_hex_string(); + Ok(hex_str.into()) + } +} + +// Implement rusqlite traits for a bunch of structs that used to be defined +// in the chainstate code +impl_byte_array_rusqlite_only!(ConsensusHash); +impl_byte_array_rusqlite_only!(Hash160); +impl_byte_array_rusqlite_only!(BlockHeaderHash); +impl_byte_array_rusqlite_only!(VRFSeed); +impl_byte_array_rusqlite_only!(BurnchainHeaderHash); +impl_byte_array_rusqlite_only!(VRFProof); +impl_byte_array_rusqlite_only!(TrieHash); +impl_byte_array_rusqlite_only!(Sha512Trunc256Sum); +impl_byte_array_rusqlite_only!(MessageSignature); +impl_byte_array_rusqlite_only!(SortitionId); +impl_byte_array_rusqlite_only!(StacksBlockId); diff --git a/stacks-common/src/util/macros.rs b/stacks-common/src/util/macros.rs index 57ce30ad9c8..4e15c5485bd 100644 --- a/stacks-common/src/util/macros.rs +++ b/stacks-common/src/util/macros.rs @@ -88,30 +88,74 @@ macro_rules! define_named_enum { /// and EnumType.get_name() for free. #[macro_export] macro_rules! define_versioned_named_enum { - ($Name:ident($VerType:ty) { $($Variant:ident($VarName:literal, $Version:expr),)* }) => - { + ($Name:ident($VerType:ty) { $($Variant:ident($VarName:literal, $MinVersion:expr)),* $(,)* }) => { + $crate::define_versioned_named_enum_internal!($Name($VerType) { + $($Variant($VarName, $MinVersion, None)),* + }); + }; +} +#[macro_export] +macro_rules! define_versioned_named_enum_with_max { + ($Name:ident($VerType:ty) { $($Variant:ident($VarName:literal, $MinVersion:expr, $MaxVersion:expr)),* $(,)* }) => { + $crate::define_versioned_named_enum_internal!($Name($VerType) { + $($Variant($VarName, $MinVersion, $MaxVersion)),* + }); + }; +} + +// An internal macro that does the actual enum definition +#[macro_export] +macro_rules! define_versioned_named_enum_internal { + ($Name:ident($VerType:ty) { $($Variant:ident($VarName:literal, $MinVersion:expr, $MaxVersion:expr)),* $(,)* }) => { #[derive(::serde::Serialize, ::serde::Deserialize, Debug, Hash, PartialEq, Eq, Copy, Clone)] pub enum $Name { $($Variant),*, } + impl $Name { pub const ALL: &'static [$Name] = &[$($Name::$Variant),*]; pub const ALL_NAMES: &'static [&'static str] = &[$($VarName),*]; pub fn lookup_by_name(name: &str) -> Option { match name { - $( - $VarName => Some($Name::$Variant), - )* - _ => None + $($VarName => Some($Name::$Variant),)* + _ => None, + } + } + + pub fn lookup_by_name_at_version(name: &str, version: &ClarityVersion) -> Option { + Self::lookup_by_name(name).and_then(|variant| { + let is_active = match ( + variant.get_min_version(), + variant.get_max_version(), + ) { + (ref min_version, Some(ref max_version)) => { + min_version <= version && version <= max_version + } + // No max version is set, so the function is active for all versions greater than min + (ref min_version, None) => min_version <= version, + }; + if is_active { + Some(variant) + } else { + None + } + }) + } + + /// Returns the first Clarity version in which `self` is defined. + pub fn get_min_version(&self) -> $VerType { + match self { + $(Self::$Variant => $MinVersion,)* } } - pub fn get_version(&self) -> $VerType { + /// Returns `Some` for the last Clarity version in which `self` is + /// defined, or `None` if `self` is defined for all versions after + /// `get_min_version()`. + pub fn get_max_version(&self) -> Option<$VerType> { match self { - $( - $Name::$Variant => $Version, - )* + $(Self::$Variant => $MaxVersion,)* } } @@ -125,18 +169,17 @@ macro_rules! define_versioned_named_enum { pub fn get_name_str(&self) -> &'static str { match self { - $( - $Name::$Variant => $VarName, - )* + $(Self::$Variant => $VarName,)* } } } + impl ::std::fmt::Display for $Name { fn fmt(&self, f: &mut ::std::fmt::Formatter<'_>) -> ::std::fmt::Result { write!(f, "{}", self.get_name_str()) } } - } + }; } #[allow(clippy::crate_in_macro_def)] @@ -531,6 +574,11 @@ macro_rules! impl_byte_array_newtype { to_hex(&self.0) } } + impl std::fmt::LowerHex for $thing { + fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { + write!(f, "{}", self.to_hex()) + } + } impl std::fmt::Display for $thing { fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { write!(f, "{}", self.to_hex()) @@ -637,6 +685,7 @@ macro_rules! fmax { }} } +#[cfg(feature = "canonical")] macro_rules! impl_byte_array_rusqlite_only { ($thing:ident) => { impl rusqlite::types::FromSql for $thing { diff --git a/stacks-signer/CHANGELOG.md b/stacks-signer/CHANGELOG.md new file mode 100644 index 00000000000..6b28b15e8f6 --- /dev/null +++ b/stacks-signer/CHANGELOG.md @@ -0,0 +1,24 @@ +# Changelog + +All notable changes to this project will be documented in this file. + +The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), +and this project adheres to the versioning scheme outlined in the [README.md](README.md). + +## [Unreleased] + +## [2.5.0.0.5.1] + +### Added + +- Adds signerdb schema versioning (#4965) +- Added voting cli commands `generate-vote` and `verify-vote` (#4934) +- Add soritiion tracking cache (#4905) +- Push blocks to signer set and adds `/v3/blocks/upload` (#4902) + +### Changed + +- Fix an issue of poorly timed tenure and bitcoin blocks (#4956) +- Process pending blocks before ending tenure (#4952) +- Update rusqlite/sqlite versions (#4948) +- return last block sortition in `/v3/sortitions` (#4939) diff --git a/stacks-signer/Cargo.toml b/stacks-signer/Cargo.toml index 57b2e808049..1d1af6da783 100644 --- a/stacks-signer/Cargo.toml +++ b/stacks-signer/Cargo.toml @@ -24,8 +24,10 @@ backoff = "0.4" clarity = { path = "../clarity" } clap = { version = "4.1.1", features = ["derive", "env"] } hashbrown = { workspace = true } +lazy_static = "1.4.0" libsigner = { path = "../libsigner" } libstackerdb = { path = "../libstackerdb" } +prometheus = { version = "0.9", optional = true } rand_core = "0.6" reqwest = { version = "0.11.22", default-features = false, features = ["blocking", "json", "rustls-tls"] } serde = "1" @@ -37,22 +39,20 @@ slog-term = "2.6.0" stacks-common = { path = "../stacks-common" } stackslib = { path = "../stackslib" } thiserror = "1.0" +tiny_http = { version = "0.12", optional = true } toml = "0.5.6" tracing = "0.1.37" tracing-subscriber = { version = "0.3.17", features = ["env-filter"] } wsts = { workspace = true } rand = { workspace = true } url = "2.1.0" +rusqlite = { workspace = true } [dev-dependencies] clarity = { path = "../clarity", features = ["testing"] } polynomial = "0.2.6" num-traits = "0.2.18" -[dependencies.rusqlite] -version = "=0.24.2" -features = ["blob", "serde_json", "i128_blob", "bundled", "trace"] - [dependencies.serde_json] version = "1.0" features = ["arbitrary_precision", "unbounded_depth"] @@ -60,3 +60,6 @@ features = ["arbitrary_precision", "unbounded_depth"] [dependencies.secp256k1] version = "0.24.3" features = ["serde", "recovery"] + +[features] +monitoring_prom = ["libsigner/monitoring_prom", "prometheus", "tiny_http"] \ No newline at end of file diff --git a/stacks-signer/release-process.md b/stacks-signer/release-process.md new file mode 100644 index 00000000000..599d8c7af40 --- /dev/null +++ b/stacks-signer/release-process.md @@ -0,0 +1,85 @@ +# Release Process + +## Platform support + +| Platform | Supported | +| --------------------------- | ------------------------------------ | +| Linux 64-bit | :white_check_mark: | +| MacOS 64-bit | :white_check_mark: | +| Windows 64-bit | :white_check_mark: | +| MacOS Apple Silicon (ARM64) | _builds are provided but not tested_ | +| Linux ARMv7 | _builds are provided but not tested_ | +| Linux ARM64 | _builds are provided but not tested_ | + + +## Release Schedule and Hotfixes + +Normal releases in this repository that add new or updated features shall be released in an ad-hoc manner. The currently staged changes for such releases +are in the [develop branch](https://github.com/stacks-network/stacks-core/tree/develop). It is generally safe to run a `stacks-signer` from that branch, though it has received less rigorous testing than release branches. If bugs are found in the `develop` branch, please do [report them as issues](https://github.com/stacks-network/stacks-core/issues) on this repository. + +For fixes that impact the correct functioning or liveness of the signer, _hotfixes_ may be issued. These hotfixes are categorized by priority +according to the following rubric: + +- **High Priority**. Any fix for an issue that could deny service to the network as a whole, e.g., an issue where a particular kind of invalid transaction would cause nodes to stop processing requests or shut down unintentionally. +- **Medium Priority**. ny fix for an issue that could deny service to individual nodes. +- **Low Priority**. Any fix for an issue that is not high or medium priority. + +## Versioning + +This project uses a 6 part version number. When there is a stacks-core release, `stacks-signer` will assume the same version as the tagged `stacks-core` release (5 part version). When there are changes in-between stacks-core releases, the signer binary will assume a 6 part version. + +``` +X.Y.Z.A.n.x + +X = 2 and does not change in practice unless there’s another Stacks 2.0 type event +Y increments on consensus-breaking changes +Z increments on non-consensus-breaking changes that require a fresh chainstate (akin to semantic MAJOR) +A increments on non-consensus-breaking changes that do not require a fresh chainstate, but introduce new features (akin to semantic MINOR) +n increments on patches and hot-fixes (akin to semantic PATCH) +x increments on the current stacks-core release version +``` + +For example, if there is a stacks-core release of 2.6.0.0.0, `stacks-signer` will also be versioned as 2.6.0.0.0. If a change is needed in the signer, it may be released apart from the stacks-core as version 2.6.0.0.0.1 and will increment until the next stacks-core release. + +## Release Process + + +1. The release must be timed so that it does not interfere with a _prepare + phase_. The timing of the next Stacking cycle can be found + [here](https://stx.eco/dao/tools?tool=2). A release should happen + at least 48 hours before the start of a new cycle, to avoid interfering + with the prepare phase. + +2. Before creating the release, the release manager must determine the _version + number_ for this release, and create a release branch in the format: `release/signer-X.Y.Z.A.n.x`. + The factors that determine the version number are discussed in [Versioning](#versioning). + +3. _Blocking_ PRs or issues are enumerated and a label should be applied to each + issue/PR such as `signer-X.Y.Z.A.n.x-blocker`. The Issue/PR owners for each should be pinged + for updates on whether or not those issues/PRs have any blockers or are waiting on feedback. + __Note__: It may be necessary to cherry-pick these PR's into the target branch `release/signer-X.Y.Z.A.n.x` + +4. The [CHANGELOG.md](./CHANGELOG.md) file shall be updated with summaries of what + was `Added`, `Changed`, and `Fixed` in the base branch. For example, pull requests + merged into `develop` can be found [here](https://github.com/stacks-network/stacks-blockchain/pulls?q=is%3Apr+is%3Aclosed+base%3Adevelop+sort%3Aupdated-desc). + Note, however, that GitHub apparently does not allow sorting by _merge time_, + so, when sorting by some proxy criterion, some care should be used to understand + which PR's were _merged_ after the last release. + +5. Once any blocker PRs have merged, a new tag will be created + by manually triggering the [`CI` Github Actions workflow](https://github.com/stacks-network/stacks-core/actions/workflows/ci.yml) + against the `release/signer-X.Y.Z.A.n.x` branch. + +6. Ecosystem participants will be notified of the release candidate in order + to test the release on various staging infrastructure. + +7. If bugs or issues emerge from the rollout on staging infrastructure, the release + will be delayed until those regressions are resolved. As regressions are resolved, + additional release candidates shall be tagged. + +8. Once the final release candidate has rolled out successfully without issue on staging + infrastructure, the tagged release shall no longer marked as Pre-Release on the [Github releases](https://github.com/stacks-network/stacks-blockchain/releases/) + page. Announcements will then be shared in the `#stacks-core-devs` channel in the + Stacks Discord, as well as the [mailing list](https://groups.google.com/a/stacks.org/g/announce). + +9. Finally, the release branch `release/signer-X.Y.Z.A.n.x` will be PR'ed into the `master` branch, and once merged, a PR for `master->develop` will be opened. diff --git a/stacks-signer/src/chainstate.rs b/stacks-signer/src/chainstate.rs new file mode 100644 index 00000000000..c35ceb67e03 --- /dev/null +++ b/stacks-signer/src/chainstate.rs @@ -0,0 +1,549 @@ +// Copyright (C) 2024 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use std::time::{Duration, UNIX_EPOCH}; + +use blockstack_lib::chainstate::nakamoto::NakamotoBlock; +use blockstack_lib::chainstate::stacks::TenureChangePayload; +use blockstack_lib::net::api::getsortition::SortitionInfo; +use blockstack_lib::util_lib::db::Error as DBError; +use clarity::types::chainstate::BurnchainHeaderHash; +use slog::{slog_info, slog_warn}; +use stacks_common::types::chainstate::{ConsensusHash, StacksPublicKey}; +use stacks_common::util::hash::Hash160; +use stacks_common::{info, warn}; + +use crate::client::{ClientError, StacksClient}; +use crate::config::SignerConfig; +use crate::signerdb::SignerDb; + +#[derive(thiserror::Error, Debug)] +/// Error type for the signer chainstate module +pub enum SignerChainstateError { + /// Error resulting from database interactions + #[error("Database error: {0}")] + DBError(#[from] DBError), + /// Error resulting from crate::client interactions + #[error("Client error: {0}")] + ClientError(#[from] ClientError), +} + +/// Captures this signer's current view of a sortition's miner. +#[derive(PartialEq, Eq, Debug)] +pub enum SortitionMinerStatus { + /// The signer thinks this sortition's miner is invalid, and hasn't signed any blocks for them. + InvalidatedBeforeFirstBlock, + /// The signer thinks this sortition's miner is invalid, but already signed one or more blocks for them. + InvalidatedAfterFirstBlock, + /// The signer thinks this sortition's miner is valid + Valid, +} + +/// Captures the Stacks sortition related state for +/// a successful sortition. +/// +/// Sortition state in this struct is +/// is indexed using consensus hashes, and fetched from a single "get latest" RPC call +/// to the stacks node. This ensures that the state in this struct is consistent with itself +/// (i.e., it does not span a bitcoin fork) and up to date. +#[derive(Debug)] +pub struct SortitionState { + /// The miner's pub key hash + pub miner_pkh: Hash160, + /// If known already, the public key which hashes to `miner_pkh` + pub miner_pubkey: Option, + /// the last burn block in this fork which had a sortition + pub prior_sortition: ConsensusHash, + /// the committed to parent tenure ID + pub parent_tenure_id: ConsensusHash, + /// this sortition's consensus hash + pub consensus_hash: ConsensusHash, + /// what is this signer's view of the this sortition's miner? did they misbehave? + pub miner_status: SortitionMinerStatus, + /// the timestamp in the burn block that performed this sortition + pub burn_header_timestamp: u64, + /// the burn header hash of the burn block that performed this sortition + pub burn_block_hash: BurnchainHeaderHash, +} + +impl SortitionState { + /// Check if the sortition is timed out (i.e., the miner did not propose a block in time) + pub fn is_timed_out( + &self, + timeout: Duration, + signer_db: &SignerDb, + ) -> Result { + // if the miner has already been invalidated, we don't need to check if they've timed out. + if self.miner_status != SortitionMinerStatus::Valid { + return Ok(false); + } + // if we've already signed a block in this tenure, the miner can't have timed out. + let has_blocks = signer_db + .get_last_signed_block_in_tenure(&self.consensus_hash)? + .is_some(); + if has_blocks { + return Ok(false); + } + let Some(received_ts) = signer_db.get_burn_block_receive_time(&self.burn_block_hash)? + else { + return Ok(false); + }; + let received_time = UNIX_EPOCH + Duration::from_secs(received_ts); + let Ok(elapsed) = std::time::SystemTime::now().duration_since(received_time) else { + return Ok(false); + }; + if elapsed > timeout { + return Ok(true); + } + Ok(false) + } +} + +/// Captures the configuration settings used by the signer when evaluating block proposals. +#[derive(Debug, Clone)] +pub struct ProposalEvalConfig { + /// How much time must pass between the first block proposal in a tenure and the next bitcoin block + /// before a subsequent miner isn't allowed to reorg the tenure + pub first_proposal_burn_block_timing: Duration, + /// Time between processing a sortition and proposing a block before the block is considered invalid + pub block_proposal_timeout: Duration, +} + +impl From<&SignerConfig> for ProposalEvalConfig { + fn from(value: &SignerConfig) -> Self { + Self { + first_proposal_burn_block_timing: value.first_proposal_burn_block_timing, + block_proposal_timeout: value.block_proposal_timeout, + } + } +} + +/// The signer's current view of the stacks chain's sortition +/// state +#[derive(Debug)] +pub struct SortitionsView { + /// the prior successful sortition (this corresponds to the "prior" miner slot) + pub last_sortition: Option, + /// the current successful sortition (this corresponds to the "current" miner slot) + pub cur_sortition: SortitionState, + /// the hash at which the sortitions view was fetched + pub latest_consensus_hash: ConsensusHash, + /// configuration settings for evaluating proposals + pub config: ProposalEvalConfig, +} + +impl TryFrom for SortitionState { + type Error = ClientError; + fn try_from(value: SortitionInfo) -> Result { + Ok(Self { + miner_pkh: value + .miner_pk_hash160 + .ok_or_else(|| ClientError::UnexpectedSortitionInfo)?, + miner_pubkey: None, + prior_sortition: value + .last_sortition_ch + .ok_or_else(|| ClientError::UnexpectedSortitionInfo)?, + consensus_hash: value.consensus_hash, + parent_tenure_id: value + .stacks_parent_ch + .ok_or_else(|| ClientError::UnexpectedSortitionInfo)?, + burn_header_timestamp: value.burn_header_timestamp, + burn_block_hash: value.burn_block_hash, + miner_status: SortitionMinerStatus::Valid, + }) + } +} + +enum ProposedBy<'a> { + LastSortition(&'a SortitionState), + CurrentSortition(&'a SortitionState), +} + +impl<'a> ProposedBy<'a> { + pub fn state(&self) -> &SortitionState { + match self { + ProposedBy::LastSortition(x) => x, + ProposedBy::CurrentSortition(x) => x, + } + } +} + +impl SortitionsView { + /// Apply checks from the SortitionsView on the block proposal. + pub fn check_proposal( + &mut self, + client: &StacksClient, + signer_db: &SignerDb, + block: &NakamotoBlock, + block_pk: &StacksPublicKey, + ) -> Result { + if self + .cur_sortition + .is_timed_out(self.config.block_proposal_timeout, signer_db)? + { + self.cur_sortition.miner_status = SortitionMinerStatus::InvalidatedBeforeFirstBlock; + } + if let Some(last_sortition) = self.last_sortition.as_mut() { + if last_sortition.is_timed_out(self.config.block_proposal_timeout, signer_db)? { + last_sortition.miner_status = SortitionMinerStatus::InvalidatedBeforeFirstBlock; + } + } + let bitvec_all_1s = block.header.pox_treatment.iter().all(|entry| entry); + if !bitvec_all_1s { + warn!( + "Miner block proposal has bitvec field which punishes in disagreement with signer. Considering invalid."; + "proposed_block_consensus_hash" => %block.header.consensus_hash, + "proposed_block_signer_sighash" => %block.header.signer_signature_hash(), + "current_sortition_consensus_hash" => ?self.cur_sortition.consensus_hash, + "last_sortition_consensus_hash" => ?self.last_sortition.as_ref().map(|x| x.consensus_hash), + ); + return Ok(false); + } + + let block_pkh = Hash160::from_data(&block_pk.to_bytes_compressed()); + let Some(proposed_by) = + (if block.header.consensus_hash == self.cur_sortition.consensus_hash { + Some(ProposedBy::CurrentSortition(&self.cur_sortition)) + } else { + None + }) + .or_else(|| { + self.last_sortition.as_ref().and_then(|last_sortition| { + if block.header.consensus_hash == last_sortition.consensus_hash { + Some(ProposedBy::LastSortition(last_sortition)) + } else { + None + } + }) + }) + else { + warn!( + "Miner block proposal has consensus hash that is neither the current or last sortition. Considering invalid."; + "proposed_block_consensus_hash" => %block.header.consensus_hash, + "proposed_block_signer_sighash" => %block.header.signer_signature_hash(), + "current_sortition_consensus_hash" => ?self.cur_sortition.consensus_hash, + "last_sortition_consensus_hash" => ?self.last_sortition.as_ref().map(|x| x.consensus_hash), + ); + return Ok(false); + }; + + if proposed_by.state().miner_pkh != block_pkh { + warn!( + "Miner block proposal pubkey does not match the winning pubkey hash for its sortition. Considering invalid."; + "proposed_block_consensus_hash" => %block.header.consensus_hash, + "proposed_block_signer_sighash" => %block.header.signer_signature_hash(), + "proposed_block_pubkey" => &block_pk.to_hex(), + "proposed_block_pubkey_hash" => %block_pkh, + "sortition_winner_pubkey_hash" => %proposed_by.state().miner_pkh, + ); + return Ok(false); + } + + // check that this miner is the most recent sortition + match proposed_by { + ProposedBy::CurrentSortition(sortition) => { + if sortition.miner_status != SortitionMinerStatus::Valid { + warn!( + "Current miner behaved improperly, this signer views the miner as invalid."; + "proposed_block_consensus_hash" => %block.header.consensus_hash, + "proposed_block_signer_sighash" => %block.header.signer_signature_hash(), + ); + return Ok(false); + } + } + ProposedBy::LastSortition(_last_sortition) => { + // should only consider blocks from the last sortition if the new sortition was invalidated + // before we signed their first block. + if self.cur_sortition.miner_status + != SortitionMinerStatus::InvalidatedBeforeFirstBlock + { + warn!( + "Miner block proposal is from last sortition winner, when the new sortition winner is still valid. Considering proposal invalid."; + "proposed_block_consensus_hash" => %block.header.consensus_hash, + "proposed_block_signer_sighash" => %block.header.signer_signature_hash(), + ); + return Ok(false); + } + } + }; + + if let Some(tenure_change) = block.get_tenure_change_tx_payload() { + // in tenure changes, we need to check: + // (1) if the tenure change confirms the expected parent block (i.e., + // the last block we signed in the parent tenure) + // (2) if the parent tenure was a valid choice + let confirms_expected_parent = + Self::check_tenure_change_block_confirmation(tenure_change, block, signer_db)?; + if !confirms_expected_parent { + return Ok(false); + } + // now, we have to check if the parent tenure was a valid choice. + let is_valid_parent_tenure = Self::check_parent_tenure_choice( + proposed_by.state(), + block, + signer_db, + client, + &self.config.first_proposal_burn_block_timing, + )?; + if !is_valid_parent_tenure { + return Ok(false); + } + let last_in_tenure = signer_db + .get_last_signed_block_in_tenure(&block.header.consensus_hash) + .map_err(|e| ClientError::InvalidResponse(e.to_string()))?; + if let Some(last_in_tenure) = last_in_tenure { + warn!( + "Miner block proposal contains a tenure change, but we've already signed a block in this tenure. Considering proposal invalid."; + "proposed_block_consensus_hash" => %block.header.consensus_hash, + "proposed_block_signer_sighash" => %block.header.signer_signature_hash(), + "last_in_tenure_signer_sighash" => %last_in_tenure.block.header.signer_signature_hash(), + ); + return Ok(false); + } + } else { + // check if the new block confirms the last block in the current tenure + let confirms_latest_in_tenure = + Self::confirms_known_blocks_in(block, &block.header.consensus_hash, signer_db)?; + if !confirms_latest_in_tenure { + return Ok(false); + } + } + + if let Some(tenure_extend) = block.get_tenure_extend_tx_payload() { + // in tenure extends, we need to check: + // (1) if this is the most recent sortition, an extend is allowed if it changes the burnchain view + // (2) if this is the most recent sortition, an extend is allowed if enough time has passed to refresh the block limit + let changed_burn_view = + tenure_extend.burn_view_consensus_hash != proposed_by.state().consensus_hash; + let enough_time_passed = Self::tenure_time_passed_block_lim()?; + if !changed_burn_view && !enough_time_passed { + warn!( + "Miner block proposal contains a tenure extend, but the burnchain view has not changed and enough time has not passed to refresh the block limit. Considering proposal invalid."; + "proposed_block_consensus_hash" => %block.header.consensus_hash, + "proposed_block_signer_sighash" => %block.header.signer_signature_hash(), + ); + return Ok(false); + } + } + + Ok(true) + } + + fn check_parent_tenure_choice( + sortition_state: &SortitionState, + block: &NakamotoBlock, + signer_db: &SignerDb, + client: &StacksClient, + first_proposal_burn_block_timing: &Duration, + ) -> Result { + // if the parent tenure is the last sortition, it is a valid choice. + // if the parent tenure is a reorg, then all of the reorged sortitions + // must either have produced zero blocks _or_ produced their first block + // very close to the burn block transition. + if sortition_state.prior_sortition == sortition_state.parent_tenure_id { + return Ok(true); + } + info!( + "Most recent miner's tenure does not build off the prior sortition, checking if this is valid behavior"; + "proposed_block_consensus_hash" => %block.header.consensus_hash, + "proposed_block_signer_sighash" => %block.header.signer_signature_hash(), + "sortition_state.consensus_hash" => %sortition_state.consensus_hash, + "sortition_state.prior_sortition" => %sortition_state.prior_sortition, + "sortition_state.parent_tenure_id" => %sortition_state.parent_tenure_id, + ); + + let tenures_reorged = client.get_tenure_forking_info( + &sortition_state.parent_tenure_id, + &sortition_state.prior_sortition, + )?; + if tenures_reorged.is_empty() { + warn!("Miner is not building off of most recent tenure, but stacks node was unable to return information about the relevant sortitions. Marking miner invalid."; + "proposed_block_consensus_hash" => %block.header.consensus_hash, + "proposed_block_signer_sighash" => %block.header.signer_signature_hash(), + ); + return Ok(false); + } + + // this value *should* always be some, but try to do the best we can if it isn't + let sortition_state_received_time = + signer_db.get_burn_block_receive_time(&sortition_state.burn_block_hash)?; + + for tenure in tenures_reorged.iter() { + if tenure.consensus_hash == sortition_state.parent_tenure_id { + // this was a built-upon tenure, no need to check this tenure as part of the reorg. + continue; + } + + if tenure.first_block_mined.is_some() { + let Some(local_block_info) = + signer_db.get_first_signed_block_in_tenure(&tenure.consensus_hash)? + else { + warn!( + "Miner is not building off of most recent tenure, but a tenure they attempted to reorg has already mined blocks, and there is no local knowledge for that tenure's block timing."; + "proposed_block_consensus_hash" => %block.header.consensus_hash, + "proposed_block_signer_sighash" => %block.header.signer_signature_hash(), + "parent_tenure" => %sortition_state.parent_tenure_id, + "last_sortition" => %sortition_state.prior_sortition, + "violating_tenure_id" => %tenure.consensus_hash, + "violating_tenure_first_block_id" => ?tenure.first_block_mined, + ); + return Ok(false); + }; + + let checked_proposal_timing = if let Some(sortition_state_received_time) = + sortition_state_received_time + { + // how long was there between when the proposal was received and the next sortition started? + let proposal_to_sortition = if let Some(signed_at) = + local_block_info.signed_self + { + sortition_state_received_time.saturating_sub(signed_at) + } else { + info!("We did not sign over the reorged tenure's first block, considering it as a late-arriving proposal"); + 0 + }; + if Duration::from_secs(proposal_to_sortition) + <= *first_proposal_burn_block_timing + { + info!( + "Miner is not building off of most recent tenure. A tenure they reorg has already mined blocks, but the block was poorly timed, allowing the reorg."; + "proposed_block_consensus_hash" => %block.header.consensus_hash, + "proposed_block_signer_sighash" => %block.header.signer_signature_hash(), + "parent_tenure" => %sortition_state.parent_tenure_id, + "last_sortition" => %sortition_state.prior_sortition, + "violating_tenure_id" => %tenure.consensus_hash, + "violating_tenure_first_block_id" => ?tenure.first_block_mined, + "violating_tenure_proposed_time" => local_block_info.proposed_time, + "new_tenure_received_time" => sortition_state_received_time, + "new_tenure_burn_timestamp" => sortition_state.burn_header_timestamp, + ); + continue; + } + true + } else { + false + }; + + warn!( + "Miner is not building off of most recent tenure, but a tenure they attempted to reorg has already mined blocks."; + "proposed_block_consensus_hash" => %block.header.consensus_hash, + "proposed_block_signer_sighash" => %block.header.signer_signature_hash(), + "parent_tenure" => %sortition_state.parent_tenure_id, + "last_sortition" => %sortition_state.prior_sortition, + "violating_tenure_id" => %tenure.consensus_hash, + "violating_tenure_first_block_id" => ?tenure.first_block_mined, + "checked_proposal_timing" => checked_proposal_timing, + ); + return Ok(false); + } + } + + Ok(true) + } + + fn check_tenure_change_block_confirmation( + tenure_change: &TenureChangePayload, + block: &NakamotoBlock, + signer_db: &SignerDb, + ) -> Result { + // in tenure changes, we need to check: + // (1) if the tenure change confirms the expected parent block (i.e., + // the last block we signed in the parent tenure) + // (2) if the parent tenure was a valid choice + Self::confirms_known_blocks_in(block, &tenure_change.prev_tenure_consensus_hash, signer_db) + } + + fn confirms_known_blocks_in( + block: &NakamotoBlock, + tenure: &ConsensusHash, + signer_db: &SignerDb, + ) -> Result { + let Some(last_known_block) = signer_db + .get_last_signed_block_in_tenure(tenure) + .map_err(|e| ClientError::InvalidResponse(e.to_string()))? + else { + info!( + "Have not signed off on any blocks in the parent tenure, assuming block confirmation is correct"; + "proposed_block_consensus_hash" => %block.header.consensus_hash, + "proposed_block_signer_sighash" => %block.header.signer_signature_hash(), + "tenure" => %tenure, + ); + return Ok(true); + }; + if block.header.chain_length > last_known_block.block.header.chain_length { + Ok(true) + } else { + warn!( + "Miner's block proposal does not confirm as many blocks as we expect"; + "proposed_block_consensus_hash" => %block.header.consensus_hash, + "proposed_block_signer_sighash" => %block.header.signer_signature_hash(), + "proposed_chain_length" => block.header.chain_length, + "expected_at_least" => last_known_block.block.header.chain_length + 1, + ); + Ok(false) + } + } + + /// Has the current tenure lasted long enough to extend the block limit? + pub fn tenure_time_passed_block_lim() -> Result { + // TODO + Ok(false) + } + + /// Fetch a new view of the recent sortitions + pub fn fetch_view( + config: ProposalEvalConfig, + client: &StacksClient, + ) -> Result { + let latest_state = client.get_latest_sortition()?; + let latest_ch = latest_state.consensus_hash; + + // figure out what cur_sortition will be set to. + // if the latest sortition wasn't successful, query the last one that was. + let latest_success = if latest_state.was_sortition { + latest_state + } else { + info!("Latest state wasn't a sortition: {latest_state:?}"); + let last_sortition_ch = latest_state + .last_sortition_ch + .as_ref() + .ok_or_else(|| ClientError::NoSortitionOnChain)?; + client.get_sortition(last_sortition_ch)? + }; + + // now, figure out what `last_sortition` will be set to. + let last_sortition = latest_success + .last_sortition_ch + .as_ref() + .map(|ch| client.get_sortition(ch)) + .transpose()?; + + let cur_sortition = SortitionState::try_from(latest_success)?; + let last_sortition = last_sortition + .map(SortitionState::try_from) + .transpose() + .ok() + .flatten(); + + let latest_consensus_hash = latest_ch; + + Ok(Self { + cur_sortition, + last_sortition, + latest_consensus_hash, + config, + }) + } +} diff --git a/stacks-signer/src/cli.rs b/stacks-signer/src/cli.rs index 28ead30fee8..74e2cd2344c 100644 --- a/stacks-signer/src/cli.rs +++ b/stacks-signer/src/cli.rs @@ -14,26 +14,61 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . use std::io::{self, Read}; -use std::net::SocketAddr; use std::path::PathBuf; use blockstack_lib::chainstate::stacks::address::PoxAddress; use blockstack_lib::util_lib::signed_structured_data::pox4::Pox4SignatureTopic; +use blockstack_lib::util_lib::signed_structured_data::{ + make_structured_data_domain, structured_data_message_hash, +}; use clap::{ArgAction, Parser, ValueEnum}; -use clarity::vm::types::QualifiedContractIdentifier; +use clarity::consts::CHAIN_ID_MAINNET; +use clarity::types::chainstate::StacksPublicKey; +use clarity::types::{PrivateKey, PublicKey}; +use clarity::util::hash::Sha256Sum; +use clarity::util::secp256k1::MessageSignature; +use clarity::vm::types::{QualifiedContractIdentifier, TupleData}; +use clarity::vm::Value; +use lazy_static::lazy_static; +use serde::{Deserialize, Serialize}; use stacks_common::address::{ b58, AddressHashMode, C32_ADDRESS_VERSION_MAINNET_MULTISIG, C32_ADDRESS_VERSION_MAINNET_SINGLESIG, C32_ADDRESS_VERSION_TESTNET_MULTISIG, C32_ADDRESS_VERSION_TESTNET_SINGLESIG, }; +use stacks_common::define_u8_enum; use stacks_common::types::chainstate::StacksPrivateKey; -use crate::config::Network; - extern crate alloc; +const GIT_BRANCH: Option<&'static str> = option_env!("GIT_BRANCH"); +const GIT_COMMIT: Option<&'static str> = option_env!("GIT_COMMIT"); +#[cfg(debug_assertions)] +const BUILD_TYPE: &str = "debug"; +#[cfg(not(debug_assertions))] +const BUILD_TYPE: &str = "release"; + +lazy_static! { + static ref VERSION_STRING: String = { + let pkg_version = option_env!("STACKS_NODE_VERSION").unwrap_or(env!("CARGO_PKG_VERSION")); + let git_branch = GIT_BRANCH.unwrap_or(""); + let git_commit = GIT_COMMIT.unwrap_or(""); + format!( + "{} ({}:{}, {} build, {} [{}])", + pkg_version, + git_branch, + git_commit, + BUILD_TYPE, + std::env::consts::OS, + std::env::consts::ARCH + ) + }; +} + #[derive(Parser, Debug)] #[command(author, version, about)] +#[command(long_version = VERSION_STRING.as_str())] + /// The CLI arguments for the stacks signer pub struct Cli { /// Subcommand action to take @@ -44,28 +79,24 @@ pub struct Cli { /// Subcommands for the stacks signer binary #[derive(clap::Subcommand, Debug)] pub enum Command { - /// Get a chunk from the stacker-db instance + /// Get a chunk from the stacker-db instance in hex encoding GetChunk(GetChunkArgs), - /// Get the latest chunk from the stacker-db instance + /// Get the latest chunk from the stacker-db instance in hex encoding GetLatestChunk(GetLatestChunkArgs), - /// List chunks from the stacker-db instance + /// List chunks from the stacker-db instance in hex encoding ListChunks(StackerDBArgs), /// Upload a chunk to the stacker-db instance PutChunk(PutChunkArgs), - /// Run DKG and sign the message through the stacker-db instance - DkgSign(SignArgs), - /// Sign the message through the stacker-db instance - Sign(SignArgs), - /// Run a DKG round through the stacker-db instance - Dkg(RunDkgArgs), /// Run the signer, waiting for events from the stacker-db instance Run(RunSignerArgs), - /// Generate necessary files for running a collection of signers - GenerateFiles(GenerateFilesArgs), /// Generate a signature for Stacking transactions GenerateStackingSignature(GenerateStackingSignatureArgs), /// Check a configuration file and output config information CheckConfig(RunSignerArgs), + /// Vote for a specified SIP with a yes or no vote + GenerateVote(GenerateVoteArgs), + /// Verify the vote for a specified SIP against a public key and vote info + VerifyVote(VerifyVoteArgs), } /// Basic arguments for all cyrptographic and stacker-db functionality @@ -127,72 +158,107 @@ pub struct PutChunkArgs { } #[derive(Parser, Debug, Clone)] -/// Arguments for the dkg-sign and sign command -pub struct SignArgs { +/// Arguments for the Run command +pub struct RunSignerArgs { /// Path to config file #[arg(long, short, value_name = "FILE")] pub config: PathBuf, - /// The reward cycle the signer is registered for and wants to sign for - /// Note: this must be the current reward cycle of the node - #[arg(long, short)] - pub reward_cycle: u64, - /// The data to sign - #[arg(required = false, value_parser = parse_data)] - // Note this weirdness is due to https://github.com/clap-rs/clap/discussions/4695 - // Need to specify the long name here due to invalid parsing in Clap which looks at the NAME rather than the TYPE which causes issues in how it handles Vec's. - pub data: alloc::vec::Vec, } #[derive(Parser, Debug, Clone)] -/// Arguments for the Dkg command -pub struct RunDkgArgs { - /// Path to config file +/// Arguments for the Vote command +pub struct GenerateVoteArgs { + /// Path to signer config file #[arg(long, short, value_name = "FILE")] pub config: PathBuf, - /// The reward cycle the signer is registered for and wants to peform DKG for - #[arg(long, short)] - pub reward_cycle: u64, + /// The vote info being cast + #[clap(flatten)] + pub vote_info: VoteInfo, } -#[derive(Parser, Debug, Clone)] -/// Arguments for the Run command -pub struct RunSignerArgs { - /// Path to config file - #[arg(long, short, value_name = "FILE")] - pub config: PathBuf, +#[derive(Parser, Debug, Clone, Copy)] +/// Arguments for the VerifyVote command +pub struct VerifyVoteArgs { + /// The Stacks public key to verify against + #[arg(short, long, value_parser = parse_public_key)] + pub public_key: StacksPublicKey, + /// The message signature in hexadecimal format + #[arg(short, long, value_parser = parse_message_signature)] + pub signature: MessageSignature, + /// The vote info being verified + #[clap(flatten)] + pub vote_info: VoteInfo, } -#[derive(Parser, Debug, Clone)] -/// Arguments for the generate-files command -pub struct GenerateFilesArgs { - /// The Stacks node to connect to - #[arg(long)] - pub host: SocketAddr, - #[arg( - long, - required_unless_present = "private_keys", - conflicts_with = "private_keys" - )] - /// The number of signers to generate - pub num_signers: Option, - #[clap(long, value_name = "FILE")] - /// A path to a file containing a list of hexadecimal Stacks private keys of the signers - pub private_keys: Option, - #[arg(long, value_parser = parse_network)] - /// The network to use. One of "mainnet", "testnet", or "mocknet". - pub network: Network, - /// The directory to write the test data files to - #[arg(long, default_value = ".")] - pub dir: PathBuf, - /// The number of milliseconds to wait when polling for events from the stacker-db instance. +#[derive(Parser, Debug, Clone, Copy)] +/// Information about a SIP vote +pub struct VoteInfo { + /// The SIP number to vote on #[arg(long)] - pub timeout: Option, - #[arg(long)] - /// The authorization password to use to connect to the validate block proposal node endpoint - pub password: String, + pub sip: u32, + /// The vote to cast + #[arg(long, value_parser = parse_vote)] + pub vote: Vote, } -#[derive(Clone, Debug)] +impl VoteInfo { + /// Get the digest to sign that authenticates this vote data + fn digest(&self) -> Sha256Sum { + let vote_message = TupleData::from_data(vec![ + ("sip".into(), Value::UInt(self.sip.into())), + ("vote".into(), Value::UInt(self.vote.to_u8().into())), + ]) + .unwrap(); + let data_domain = + make_structured_data_domain("signer-sip-voting", "1.0.0", CHAIN_ID_MAINNET); + structured_data_message_hash(vote_message.into(), data_domain) + } + + /// Sign the vote data and return the signature + pub fn sign(&self, private_key: &StacksPrivateKey) -> Result { + let digest = self.digest(); + private_key.sign(digest.as_bytes()) + } + + /// Verify the vote data against the provided public key and signature + pub fn verify( + &self, + public_key: &StacksPublicKey, + signature: &MessageSignature, + ) -> Result { + let digest = self.digest(); + public_key.verify(digest.as_bytes(), signature) + } +} + +define_u8_enum!( +/// A given vote for a SIP +Vote { + /// Vote yes + Yes = 0, + /// Vote no + No = 1 +}); + +impl TryFrom<&str> for Vote { + type Error = String; + fn try_from(input: &str) -> Result { + match input.to_lowercase().as_str() { + "yes" => Ok(Vote::Yes), + "no" => Ok(Vote::No), + _ => Err(format!("Invalid vote: {}. Must be `yes` or `no`.", input)), + } + } +} + +impl TryFrom for Vote { + type Error = String; + fn try_from(input: u8) -> Result { + Vote::from_u8(input).ok_or_else(|| format!("Invalid vote: {}. Must be 0 or 1.", input)) + } +} + +#[derive(Clone, Debug, PartialEq)] /// Wrapper around `Pox4SignatureTopic` to implement `ValueEnum` pub struct StackingSignatureMethod(Pox4SignatureTopic); @@ -219,22 +285,27 @@ impl ValueEnum for StackingSignatureMethod { Self(Pox4SignatureTopic::StackStx), Self(Pox4SignatureTopic::StackExtend), Self(Pox4SignatureTopic::AggregationCommit), + Self(Pox4SignatureTopic::AggregationIncrease), + Self(Pox4SignatureTopic::StackIncrease), ] } fn from_str(input: &str, _ignore_case: bool) -> Result { let topic = match input { - "stack-stx" => Pox4SignatureTopic::StackStx, - "stack-extend" => Pox4SignatureTopic::StackExtend, "aggregation-commit" => Pox4SignatureTopic::AggregationCommit, - "agg-commit" => Pox4SignatureTopic::AggregationCommit, - _ => return Err(format!("Invalid topic: {}", input)), + "aggregation-increase" => Pox4SignatureTopic::AggregationIncrease, + method => match Pox4SignatureTopic::lookup_by_name(method) { + Some(topic) => topic, + None => { + return Err(format!("Invalid topic: {}", input)); + } + }, }; Ok(topic.into()) } } -#[derive(Parser, Debug, Clone)] +#[derive(Parser, Debug, Clone, PartialEq)] /// Arguments for the generate-stacking-signature command pub struct GenerateStackingSignatureArgs { /// BTC address used to receive rewards @@ -297,6 +368,21 @@ fn parse_private_key(private_key: &str) -> Result { StacksPrivateKey::from_hex(private_key).map_err(|e| format!("Invalid private key: {}", e)) } +/// Parse the hexadecimal Stacks public key +fn parse_public_key(public_key: &str) -> Result { + StacksPublicKey::from_hex(public_key).map_err(|e| format!("Invalid public key: {}", e)) +} + +/// Parse the vote +fn parse_vote(vote: &str) -> Result { + vote.try_into() +} + +/// Parse the hexadecimal encoded message signature +fn parse_message_signature(signature: &str) -> Result { + MessageSignature::from_hex(signature).map_err(|e| format!("Invalid message signature: {}", e)) +} + /// Parse the input data fn parse_data(data: &str) -> Result, String> { let encoded_data = if data == "-" { @@ -312,21 +398,6 @@ fn parse_data(data: &str) -> Result, String> { Ok(data) } -/// Parse the network. Must be one of "mainnet", "testnet", or "mocknet". -fn parse_network(network: &str) -> Result { - Ok(match network.to_lowercase().as_str() { - "mainnet" => Network::Mainnet, - "testnet" => Network::Testnet, - "mocknet" => Network::Mocknet, - _ => { - return Err(format!( - "Invalid network: {}. Must be one of \"mainnet\", \"testnet\", or \"mocknet\".", - network - )) - } - }) -} - #[cfg(test)] mod tests { use blockstack_lib::chainstate::stacks::address::{PoxAddressType20, PoxAddressType32}; @@ -351,7 +422,7 @@ mod tests { } fn clarity_tuple_version(pox_addr: &PoxAddress) -> u8 { - pox_addr + *pox_addr .as_clarity_tuple() .expect("Failed to generate clarity tuple for pox address") .get("version") @@ -359,9 +430,8 @@ mod tests { .clone() .expect_buff(1) .expect("Expected version to be a u128") - .get(0) + .first() .expect("Expected version to be a uint") - .clone() } #[test] @@ -489,4 +559,40 @@ mod tests { _ => panic!("Invalid parsed address"), } } + + #[test] + fn test_parse_stacking_method() { + assert_eq!( + StackingSignatureMethod::from_str("agg-increase", true).unwrap(), + Pox4SignatureTopic::AggregationIncrease.into() + ); + assert_eq!( + StackingSignatureMethod::from_str("agg-commit", true).unwrap(), + Pox4SignatureTopic::AggregationCommit.into() + ); + assert_eq!( + StackingSignatureMethod::from_str("stack-increase", true).unwrap(), + Pox4SignatureTopic::StackIncrease.into() + ); + assert_eq!( + StackingSignatureMethod::from_str("stack-extend", true).unwrap(), + Pox4SignatureTopic::StackExtend.into() + ); + assert_eq!( + StackingSignatureMethod::from_str("stack-stx", true).unwrap(), + Pox4SignatureTopic::StackStx.into() + ); + + // These don't exactly match the enum, but are accepted if passed as + // CLI args + + assert_eq!( + StackingSignatureMethod::from_str("aggregation-increase", true).unwrap(), + Pox4SignatureTopic::AggregationIncrease.into() + ); + assert_eq!( + StackingSignatureMethod::from_str("aggregation-commit", true).unwrap(), + Pox4SignatureTopic::AggregationCommit.into() + ); + } } diff --git a/stacks-signer/src/client/mod.rs b/stacks-signer/src/client/mod.rs index 87bee147507..d2afbeb1751 100644 --- a/stacks-signer/src/client/mod.rs +++ b/stacks-signer/src/client/mod.rs @@ -15,7 +15,7 @@ // along with this program. If not, see . /// The stacker db module for communicating with the stackerdb contract -mod stackerdb; +pub(crate) mod stackerdb; /// The stacks node client module for communicating with the stacks node pub(crate) mod stacks_client; @@ -34,6 +34,8 @@ use stacks_common::debug; const BACKOFF_INITIAL_INTERVAL: u64 = 128; /// Backoff timer max interval in milliseconds const BACKOFF_MAX_INTERVAL: u64 = 16384; +/// Backoff timer max elapsed seconds +const BACKOFF_MAX_ELAPSED: u64 = 5; #[derive(thiserror::Error, Debug)] /// Client error type @@ -83,6 +85,15 @@ pub enum ClientError { /// Stacks node does not support a feature we need #[error("Stacks node does not support a required feature: {0}")] UnsupportedStacksFeature(String), + /// Invalid response from the stacks node + #[error("Invalid response from the stacks node: {0}")] + InvalidResponse(String), + /// A successful sortition has not occurred yet + #[error("The Stacks chain has not processed any successful sortitions yet")] + NoSortitionOnChain, + /// A successful sortition's info response should be parseable into a SortitionState + #[error("A successful sortition's info response should be parseable into a SortitionState")] + UnexpectedSortitionInfo, } /// Retry a function F with an exponential backoff and notification on transient failure @@ -100,6 +111,7 @@ where let backoff_timer = backoff::ExponentialBackoffBuilder::new() .with_initial_interval(Duration::from_millis(BACKOFF_INITIAL_INTERVAL)) .with_max_interval(Duration::from_millis(BACKOFF_MAX_INTERVAL)) + .with_max_elapsed_time(Some(Duration::from_secs(BACKOFF_MAX_ELAPSED))) .build(); backoff::retry_notify(backoff_timer, request_fn, notify).map_err(|_| ClientError::RetryTimeout) @@ -116,6 +128,7 @@ pub(crate) mod tests { use blockstack_lib::net::api::getpoxinfo::{ RPCPoxCurrentCycleInfo, RPCPoxEpoch, RPCPoxInfoData, RPCPoxNextCycleInfo, }; + use blockstack_lib::net::api::postfeerate::{RPCFeeEstimate, RPCFeeEstimateResponse}; use blockstack_lib::util_lib::boot::boot_code_id; use clarity::vm::costs::ExecutionCost; use clarity::vm::types::TupleData; @@ -137,7 +150,6 @@ pub(crate) mod tests { use super::*; use crate::config::{GlobalConfig, SignerConfig}; - use crate::signer::SignerSlotID; pub struct MockServerClient { pub server: TcpListener, @@ -373,6 +385,7 @@ pub(crate) mod tests { unanchored_tip: None, unanchored_seq: Some(0), exit_at_block_height: None, + is_fully_synced: false, genesis_chainstate_hash: Sha256Sum::zero(), node_public_key: Some(public_key_buf), node_public_key_hash: Some(public_key_hash), @@ -398,6 +411,44 @@ pub(crate) mod tests { format!("HTTP/1.1 200 OK\n\n{{\"okay\":true,\"result\":\"{hex}\"}}") } + /// Build a response for the get_medium_estimated_fee_ustx_response request with a specific medium estimate + pub fn build_get_medium_estimated_fee_ustx_response( + medium_estimate: u64, + ) -> (String, RPCFeeEstimateResponse) { + // Generate some random info + let fee_response = RPCFeeEstimateResponse { + estimated_cost: ExecutionCost { + write_length: thread_rng().next_u64(), + write_count: thread_rng().next_u64(), + read_length: thread_rng().next_u64(), + read_count: thread_rng().next_u64(), + runtime: thread_rng().next_u64(), + }, + estimated_cost_scalar: thread_rng().next_u64(), + cost_scalar_change_by_byte: thread_rng().next_u32() as f64, + estimations: vec![ + RPCFeeEstimate { + fee_rate: thread_rng().next_u32() as f64, + fee: thread_rng().next_u64(), + }, + RPCFeeEstimate { + fee_rate: thread_rng().next_u32() as f64, + fee: medium_estimate, + }, + RPCFeeEstimate { + fee_rate: thread_rng().next_u32() as f64, + fee: thread_rng().next_u64(), + }, + ], + }; + let fee_response_json = serde_json::to_string(&fee_response) + .expect("Failed to serialize fee estimate response"); + ( + format!("HTTP/1.1 200 OK\n\n{fee_response_json}"), + fee_response, + ) + } + /// Generate a signer config with the given number of signers and keys where the first signer is /// obtained from the provided global config pub fn generate_signer_config( @@ -515,7 +566,10 @@ pub(crate) mod tests { nonce_timeout: config.nonce_timeout, sign_timeout: config.sign_timeout, tx_fee_ustx: config.tx_fee_ustx, + max_tx_fee_ustx: config.max_tx_fee_ustx, db_path: config.db_path.clone(), + first_proposal_burn_block_timing: config.first_proposal_burn_block_timing, + block_proposal_timeout: config.block_proposal_timeout, } } diff --git a/stacks-signer/src/client/stackerdb.rs b/stacks-signer/src/client/stackerdb.rs index 691cde08ccb..de77ccbd72d 100644 --- a/stacks-signer/src/client/stackerdb.rs +++ b/stacks-signer/src/client/stackerdb.rs @@ -14,39 +14,45 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . // -use blockstack_lib::chainstate::stacks::StacksTransaction; use blockstack_lib::net::api::poststackerdbchunk::StackerDBErrorCodes; +use clarity::codec::read_next; use hashbrown::HashMap; use libsigner::{MessageSlotID, SignerMessage, SignerSession, StackerDBSession}; use libstackerdb::{StackerDBChunkAckData, StackerDBChunkData}; use slog::{slog_debug, slog_warn}; -use stacks_common::codec::{read_next, StacksMessageCodec}; use stacks_common::types::chainstate::StacksPrivateKey; use stacks_common::{debug, warn}; -use super::ClientError; -use crate::client::retry_with_exponential_backoff; +use crate::client::{retry_with_exponential_backoff, ClientError}; use crate::config::SignerConfig; -use crate::signer::SignerSlotID; + +/// The signer StackerDB slot ID, purposefully wrapped to prevent conflation with SignerID +#[derive(Debug, Clone, PartialEq, Eq, Hash, Copy, PartialOrd, Ord)] +pub struct SignerSlotID(pub u32); + +impl std::fmt::Display for SignerSlotID { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "{}", self.0) + } +} /// The StackerDB client for communicating with the .signers contract -pub struct StackerDB { +#[derive(Debug)] +pub struct StackerDB { /// The stacker-db sessions for each signer set and message type. /// Maps message ID to the DB session. - signers_message_stackerdb_sessions: HashMap, + signers_message_stackerdb_sessions: HashMap, /// The private key used in all stacks node communications stacks_private_key: StacksPrivateKey, /// A map of a message ID to last chunk version for each session - slot_versions: HashMap>, + slot_versions: HashMap>, /// The signer slot ID -- the index into the signer list for this signer daemon's signing key. signer_slot_id: SignerSlotID, /// The reward cycle of the connecting signer reward_cycle: u64, - /// The stacker-db transaction msg session for the NEXT reward cycle - next_transaction_session: StackerDBSession, } -impl From<&SignerConfig> for StackerDB { +impl From<&SignerConfig> for StackerDB { fn from(config: &SignerConfig) -> Self { Self::new( &config.node_host, @@ -57,7 +63,8 @@ impl From<&SignerConfig> for StackerDB { ) } } -impl StackerDB { + +impl StackerDB { /// Create a new StackerDB client pub fn new( host: &str, @@ -67,17 +74,11 @@ impl StackerDB { signer_slot_id: SignerSlotID, ) -> Self { let mut signers_message_stackerdb_sessions = HashMap::new(); - for msg_id in MessageSlotID::ALL { - signers_message_stackerdb_sessions.insert( - *msg_id, - StackerDBSession::new(host, msg_id.stacker_db_contract(is_mainnet, reward_cycle)), - ); + for msg_id in M::all() { + let session = + StackerDBSession::new(host, msg_id.stacker_db_contract(is_mainnet, reward_cycle)); + signers_message_stackerdb_sessions.insert(*msg_id, session); } - let next_transaction_session = StackerDBSession::new( - host, - MessageSlotID::Transactions - .stacker_db_contract(is_mainnet, reward_cycle.wrapping_add(1)), - ); Self { signers_message_stackerdb_sessions, @@ -85,25 +86,28 @@ impl StackerDB { slot_versions: HashMap::new(), signer_slot_id, reward_cycle, - next_transaction_session, } } /// Sends messages to the .signers stacker-db with an exponential backoff retry - pub fn send_message_with_retry( + pub fn send_message_with_retry>( &mut self, - message: SignerMessage, + message: T, ) -> Result { - let msg_id = message.msg_id(); + let msg_id = message.msg_id().ok_or_else(|| { + ClientError::PutChunkRejected( + "Tried to send a SignerMessage which does not have a corresponding .signers slot identifier".into() + ) + })?; let message_bytes = message.serialize_to_vec(); self.send_message_bytes_with_retry(&msg_id, message_bytes) } /// Sends message (as a raw msg ID and bytes) to the .signers stacker-db with an - /// exponential backoff retry + /// exponential backoff retry pub fn send_message_bytes_with_retry( &mut self, - msg_id: &MessageSlotID, + msg_id: &M, message_bytes: Vec, ) -> Result { let slot_id = self.signer_slot_id; @@ -126,11 +130,11 @@ impl StackerDB { chunk.sign(&self.stacks_private_key)?; let Some(session) = self.signers_message_stackerdb_sessions.get_mut(msg_id) else { - panic!("FATAL: would loop forever trying to send a message with ID {}, for which we don't have a session", msg_id); + panic!("FATAL: would loop forever trying to send a message with ID {msg_id:?}, for which we don't have a session"); }; debug!( - "Sending a chunk to stackerdb slot ID {slot_id} with version {slot_version} to contract {:?}!\n{chunk:?}", + "Sending a chunk to stackerdb slot ID {slot_id} with version {slot_version} and message ID {msg_id:?} to contract {:?}!\n{chunk:?}", &session.stackerdb_contract_id ); @@ -179,70 +183,32 @@ impl StackerDB { } } - /// Get the transactions from stackerdb for the signers - fn get_transactions( - transactions_session: &mut StackerDBSession, - signer_ids: &[SignerSlotID], - ) -> Result, ClientError> { + /// Get all signer messages from stackerdb for the given slot IDs + pub fn get_messages>( + session: &mut StackerDBSession, + slot_ids: &[u32], + ) -> Result, ClientError> { + let mut messages = vec![]; let send_request = || { - transactions_session - .get_latest_chunks(&signer_ids.iter().map(|id| id.0).collect::>()) + session + .get_latest_chunks(slot_ids) .map_err(backoff::Error::transient) }; let chunk_ack = retry_with_exponential_backoff(send_request)?; - let mut transactions = Vec::new(); for (i, chunk) in chunk_ack.iter().enumerate() { - let signer_id = *signer_ids - .get(i) - .expect("BUG: retrieved an unequal amount of chunks to requested chunks"); let Some(data) = chunk else { continue; }; - let Ok(message) = read_next::(&mut &data[..]) else { + let Ok(message) = read_next::(&mut &data[..]) else { if !data.is_empty() { warn!("Failed to deserialize chunk data into a SignerMessage"); - debug!( - "signer #{signer_id}: Failed chunk ({}): {data:?}", - &data.len(), - ); + debug!("slot #{i}: Failed chunk ({}): {data:?}", &data.len(),); } continue; }; - - let SignerMessage::Transactions(chunk_transactions) = message else { - warn!("Signer wrote an unexpected type to the transactions slot"); - continue; - }; - debug!( - "Retrieved {} transactions from signer ID {}.", - chunk_transactions.len(), - signer_id - ); - transactions.extend(chunk_transactions); + messages.push(message); } - Ok(transactions) - } - - /// Get this signer's latest transactions from stackerdb - pub fn get_current_transactions_with_retry( - &mut self, - ) -> Result, ClientError> { - let Some(transactions_session) = self - .signers_message_stackerdb_sessions - .get_mut(&MessageSlotID::Transactions) - else { - return Err(ClientError::NotConnected); - }; - Self::get_transactions(transactions_session, &[self.signer_slot_id]) - } - - /// Get the latest signer transactions from signer ids for the next reward cycle - pub fn get_next_transactions_with_retry( - &mut self, - signer_ids: &[SignerSlotID], - ) -> Result, ClientError> { - debug!("Getting latest chunks from stackerdb for the following signers: {signer_ids:?}",); - Self::get_transactions(&mut self.next_transaction_session, signer_ids) + Ok(messages) } /// Retrieve the signer set this stackerdb client is attached to @@ -251,9 +217,14 @@ impl StackerDB { } /// Retrieve the signer slot ID - pub fn get_signer_slot_id(&mut self) -> SignerSlotID { + pub fn get_signer_slot_id(&self) -> SignerSlotID { self.signer_slot_id } + + /// Get the session corresponding to the given message ID if it exists + pub fn get_session_mut(&mut self, msg_id: &M) -> Option<&mut StackerDBSession> { + self.signers_message_stackerdb_sessions.get_mut(msg_id) + } } #[cfg(test)] @@ -261,83 +232,54 @@ mod tests { use std::thread::spawn; use std::time::Duration; - use blockstack_lib::chainstate::stacks::{ - TransactionAnchorMode, TransactionAuth, TransactionPayload, TransactionPostConditionMode, - TransactionSmartContract, TransactionVersion, - }; - use blockstack_lib::util_lib::strings::StacksString; + use blockstack_lib::chainstate::nakamoto::{NakamotoBlock, NakamotoBlockHeader}; + use clarity::util::hash::{MerkleTree, Sha512Trunc256Sum}; + use libsigner::v0::messages::{BlockRejection, BlockResponse, RejectCode, SignerMessage}; use super::*; use crate::client::tests::{generate_signer_config, mock_server_from_config, write_response}; - use crate::config::GlobalConfig; + use crate::config::{build_signer_config_tomls, GlobalConfig, Network}; #[test] - fn get_signer_transactions_with_retry_should_succeed() { - let config = GlobalConfig::load_from_file("./src/tests/conf/signer-0.toml").unwrap(); + fn send_signer_message_should_succeed() { + let signer_config = build_signer_config_tomls( + &[StacksPrivateKey::new()], + "localhost:20443", + Some(Duration::from_millis(128)), // Timeout defaults to 5 seconds. Let's override it to 128 milliseconds. + &Network::Testnet, + "1234", + 16, + 3000, + Some(100_000), + None, + Some(9000), + ); + let config = GlobalConfig::load_from_str(&signer_config[0]).unwrap(); let signer_config = generate_signer_config(&config, 5, 20); let mut stackerdb = StackerDB::from(&signer_config); - let sk = StacksPrivateKey::new(); - let tx = StacksTransaction { - version: TransactionVersion::Testnet, - chain_id: 0, - auth: TransactionAuth::from_p2pkh(&sk).unwrap(), - anchor_mode: TransactionAnchorMode::Any, - post_condition_mode: TransactionPostConditionMode::Allow, - post_conditions: vec![], - payload: TransactionPayload::SmartContract( - TransactionSmartContract { - name: "test-contract".into(), - code_body: StacksString::from_str("(/ 1 0)").unwrap(), - }, - None, - ), - }; - - let signer_message = SignerMessage::Transactions(vec![tx.clone()]); - let message = signer_message.serialize_to_vec(); - - let signer_slot_ids = vec![SignerSlotID(0), SignerSlotID(1)]; - let h = spawn(move || stackerdb.get_next_transactions_with_retry(&signer_slot_ids)); - let mut response_bytes = b"HTTP/1.1 200 OK\n\n".to_vec(); - response_bytes.extend(message); - let mock_server = mock_server_from_config(&config); - write_response(mock_server, response_bytes.as_slice()); - let signer_message = SignerMessage::Transactions(vec![]); - let message = signer_message.serialize_to_vec(); - let mut response_bytes = b"HTTP/1.1 200 OK\n\n".to_vec(); - response_bytes.extend(message); - let mock_server = mock_server_from_config(&config); - write_response(mock_server, response_bytes.as_slice()); - - let transactions = h.join().unwrap().unwrap(); - assert_eq!(transactions, vec![tx]); - } - - #[test] - fn send_signer_message_with_retry_should_succeed() { - let config = GlobalConfig::load_from_file("./src/tests/conf/signer-1.toml").unwrap(); - let signer_config = generate_signer_config(&config, 5, 20); - let mut stackerdb = StackerDB::from(&signer_config); + let header = NakamotoBlockHeader::empty(); + let mut block = NakamotoBlock { + header, + txs: vec![], + }; + let tx_merkle_root = { + let txid_vecs = block + .txs + .iter() + .map(|tx| tx.txid().as_bytes().to_vec()) + .collect(); - let sk = StacksPrivateKey::new(); - let tx = StacksTransaction { - version: TransactionVersion::Testnet, - chain_id: 0, - auth: TransactionAuth::from_p2pkh(&sk).unwrap(), - anchor_mode: TransactionAnchorMode::Any, - post_condition_mode: TransactionPostConditionMode::Allow, - post_conditions: vec![], - payload: TransactionPayload::SmartContract( - TransactionSmartContract { - name: "test-contract".into(), - code_body: StacksString::from_str("(/ 1 0)").unwrap(), - }, - None, - ), + MerkleTree::::new(&txid_vecs).root() }; + block.header.tx_merkle_root = tx_merkle_root; - let signer_message = SignerMessage::Transactions(vec![tx]); + let block_reject = BlockRejection { + reason: "Did not like it".into(), + reason_code: RejectCode::RejectedInPriorRound, + signer_signature_hash: block.header.signer_signature_hash(), + }; + let signer_message = SignerMessage::BlockResponse(BlockResponse::Rejected(block_reject)); let ack = StackerDBChunkAckData { accepted: true, reason: None, @@ -345,12 +287,14 @@ mod tests { code: None, }; let mock_server = mock_server_from_config(&config); - let h = spawn(move || stackerdb.send_message_with_retry(signer_message)); + debug!("Spawning msg sender"); + let sender_thread = + spawn(move || stackerdb.send_message_with_retry(signer_message).unwrap()); let mut response_bytes = b"HTTP/1.1 200 OK\n\n".to_vec(); let payload = serde_json::to_string(&ack).expect("Failed to serialize ack"); response_bytes.extend(payload.as_bytes()); std::thread::sleep(Duration::from_millis(500)); write_response(mock_server, response_bytes.as_slice()); - assert_eq!(ack, h.join().unwrap().unwrap()); + assert_eq!(ack, sender_thread.join().unwrap()); } } diff --git a/stacks-signer/src/client/stacks_client.rs b/stacks-signer/src/client/stacks_client.rs index b89c5462dd7..223455c72d6 100644 --- a/stacks-signer/src/client/stacks_client.rs +++ b/stacks-signer/src/client/stacks_client.rs @@ -1,3 +1,4 @@ +use std::collections::VecDeque; // Copyright (C) 2013-2020 Blockstack PBC, a public benefit corporation // Copyright (C) 2020-2024 Stacks Open Internet Foundation // @@ -26,22 +27,33 @@ use blockstack_lib::chainstate::stacks::{ TransactionSpendingCondition, TransactionVersion, }; use blockstack_lib::net::api::callreadonly::CallReadOnlyResponse; +use blockstack_lib::net::api::get_tenures_fork_info::{ + TenureForkingInfo, RPC_TENURE_FORKING_INFO_PATH, +}; use blockstack_lib::net::api::getaccount::AccountEntryResponse; -use blockstack_lib::net::api::getinfo::RPCPeerInfoData; use blockstack_lib::net::api::getpoxinfo::RPCPoxInfoData; -use blockstack_lib::net::api::getstackers::GetStackersResponse; +use blockstack_lib::net::api::getsortition::{SortitionInfo, RPC_SORTITION_INFO_PATH}; +use blockstack_lib::net::api::getstackers::{GetStackersErrors, GetStackersResponse}; +use blockstack_lib::net::api::postblock::StacksBlockAcceptedData; use blockstack_lib::net::api::postblock_proposal::NakamotoBlockProposal; +use blockstack_lib::net::api::postblock_v3; +use blockstack_lib::net::api::postfeerate::{FeeRateEstimateRequestBody, RPCFeeEstimateResponse}; use blockstack_lib::util_lib::boot::{boot_code_addr, boot_code_id}; +use clarity::util::hash::to_hex; use clarity::vm::types::{PrincipalData, QualifiedContractIdentifier}; use clarity::vm::{ClarityName, ContractName, Value as ClarityValue}; +use libsigner::v0::messages::PeerInfo; use reqwest::header::AUTHORIZATION; +use serde::Deserialize; use serde_json::json; -use slog::slog_debug; +use slog::{slog_debug, slog_warn}; use stacks_common::codec::StacksMessageCodec; use stacks_common::consts::{CHAIN_ID_MAINNET, CHAIN_ID_TESTNET}; -use stacks_common::debug; -use stacks_common::types::chainstate::{StacksAddress, StacksPrivateKey, StacksPublicKey}; +use stacks_common::types::chainstate::{ + ConsensusHash, StacksAddress, StacksPrivateKey, StacksPublicKey, +}; use stacks_common::types::StacksEpochId; +use stacks_common::{debug, warn}; use wsts::curve::point::{Compressed, Point}; use crate::client::{retry_with_exponential_backoff, ClientError}; @@ -69,6 +81,12 @@ pub struct StacksClient { auth_password: String, } +#[derive(Deserialize)] +struct GetStackersErrorResp { + err_type: String, + err_msg: String, +} + impl From<&GlobalConfig> for StacksClient { fn from(config: &GlobalConfig) -> Self { Self { @@ -140,7 +158,7 @@ impl StacksClient { } /// Helper function that attempts to deserialize a clarity hext string as a list of signer slots and their associated number of signer slots - fn parse_signer_slots( + pub fn parse_signer_slots( &self, value: ClarityValue, ) -> Result, ClientError> { @@ -196,9 +214,46 @@ impl StacksClient { } } + /// Retrieve the medium estimated transaction fee in uSTX from the stacks node for the given transaction + pub fn get_medium_estimated_fee_ustx( + &self, + tx: &StacksTransaction, + ) -> Result { + let request = FeeRateEstimateRequestBody { + estimated_len: Some(tx.tx_len()), + transaction_payload: to_hex(&tx.payload.serialize_to_vec()), + }; + let timer = + crate::monitoring::new_rpc_call_timer(&self.fees_transaction_path(), &self.http_origin); + let send_request = || { + self.stacks_node_client + .post(self.fees_transaction_path()) + .header("Content-Type", "application/json") + .json(&request) + .send() + .map_err(backoff::Error::transient) + }; + let response = retry_with_exponential_backoff(send_request)?; + if !response.status().is_success() { + return Err(ClientError::RequestFailure(response.status())); + } + timer.stop_and_record(); + let fee_estimate_response = response.json::()?; + let fee = fee_estimate_response + .estimations + .get(1) + .map(|estimate| estimate.fee) + .ok_or_else(|| { + ClientError::UnexpectedResponseFormat( + "RPCFeeEstimateResponse missing medium fee estimate".into(), + ) + })?; + Ok(fee) + } + /// Determine the stacks node current epoch pub fn get_node_epoch(&self) -> Result { - let pox_info = self.get_pox_data_with_retry()?; + let pox_info = self.get_pox_data()?; let burn_block_height = self.get_burn_block_height()?; let epoch_25 = pox_info @@ -227,14 +282,13 @@ impl StacksClient { } /// Submit the block proposal to the stacks node. The block will be validated and returned via the HTTP endpoint for Block events. - pub fn submit_block_for_validation_with_retry( - &self, - block: NakamotoBlock, - ) -> Result<(), ClientError> { + pub fn submit_block_for_validation(&self, block: NakamotoBlock) -> Result<(), ClientError> { let block_proposal = NakamotoBlockProposal { block, chain_id: self.chain_id, }; + let timer = + crate::monitoring::new_rpc_call_timer(&self.block_proposal_path(), &self.http_origin); let send_request = || { self.stacks_node_client .post(self.block_proposal_path()) @@ -246,6 +300,7 @@ impl StacksClient { }; let response = retry_with_exponential_backoff(send_request)?; + timer.stop_and_record(); if !response.status().is_success() { return Err(ClientError::RequestFailure(response.status())); } @@ -316,13 +371,109 @@ impl StacksClient { /// Retrieve the current account nonce for the provided address pub fn get_account_nonce(&self, address: &StacksAddress) -> Result { - let account_entry = self.get_account_entry_with_retry(address)?; - Ok(account_entry.nonce) + self.get_account_entry(address).map(|entry| entry.nonce) + } + + /// Get information about the tenures between `chosen_parent` and `last_sortition` + pub fn get_tenure_forking_info( + &self, + chosen_parent: &ConsensusHash, + last_sortition: &ConsensusHash, + ) -> Result, ClientError> { + let mut tenures: VecDeque = + self.get_tenure_forking_info_step(chosen_parent, last_sortition)?; + if tenures.is_empty() { + return Ok(vec![]); + } + while tenures.back().map(|x| &x.consensus_hash) != Some(chosen_parent) { + let new_start = tenures.back().ok_or_else(|| { + ClientError::InvalidResponse( + "Should have tenure data in forking info response".into(), + ) + })?; + let mut next_results = + self.get_tenure_forking_info_step(chosen_parent, &new_start.consensus_hash)?; + if next_results.pop_front().is_none() { + return Err(ClientError::InvalidResponse( + "Could not fetch forking info all the way back to the requested chosen_parent" + .into(), + )); + } + if next_results.is_empty() { + return Err(ClientError::InvalidResponse( + "Could not fetch forking info all the way back to the requested chosen_parent" + .into(), + )); + } + tenures.extend(next_results.into_iter()); + } + + Ok(tenures.into_iter().collect()) + } + + fn get_tenure_forking_info_step( + &self, + chosen_parent: &ConsensusHash, + last_sortition: &ConsensusHash, + ) -> Result, ClientError> { + let send_request = || { + self.stacks_node_client + .get(self.tenure_forking_info_path(chosen_parent, last_sortition)) + .send() + .map_err(backoff::Error::transient) + }; + let response = retry_with_exponential_backoff(send_request)?; + if !response.status().is_success() { + return Err(ClientError::RequestFailure(response.status())); + } + let tenures = response.json()?; + + Ok(tenures) + } + + /// Get the sortition information for the latest sortition + pub fn get_latest_sortition(&self) -> Result { + let send_request = || { + self.stacks_node_client + .get(self.sortition_info_path()) + .send() + .map_err(|e| { + warn!("Signer failed to request latest sortition"; "err" => ?e); + e + }) + }; + let response = send_request()?; + if !response.status().is_success() { + return Err(ClientError::RequestFailure(response.status())); + } + let sortition_info = response.json()?; + Ok(sortition_info) + } + + /// Get the sortition information for a given sortition + pub fn get_sortition(&self, ch: &ConsensusHash) -> Result { + let send_request = || { + self.stacks_node_client + .get(format!("{}/consensus/{}", self.sortition_info_path(), ch.to_hex())) + .send() + .map_err(|e| { + warn!("Signer failed to request sortition"; "consensus_hash" => %ch, "err" => ?e); + e + }) + }; + let response = send_request()?; + if !response.status().is_success() { + return Err(ClientError::RequestFailure(response.status())); + } + let sortition_info = response.json()?; + Ok(sortition_info) } /// Get the current peer info data from the stacks node - pub fn get_peer_info_with_retry(&self) -> Result { + pub fn get_peer_info(&self) -> Result { debug!("Getting stacks node info..."); + let timer = + crate::monitoring::new_rpc_call_timer(&self.core_info_path(), &self.http_origin); let send_request = || { self.stacks_node_client .get(self.core_info_path()) @@ -330,10 +481,11 @@ impl StacksClient { .map_err(backoff::Error::transient) }; let response = retry_with_exponential_backoff(send_request)?; + timer.stop_and_record(); if !response.status().is_success() { return Err(ClientError::RequestFailure(response.status())); } - let peer_info_data = response.json::()?; + let peer_info_data = response.json::()?; Ok(peer_info_data) } @@ -365,28 +517,50 @@ impl StacksClient { } /// Get the reward set signers from the stacks node for the given reward cycle - pub fn get_reward_set_signers_with_retry( + pub fn get_reward_set_signers( &self, reward_cycle: u64, ) -> Result>, ClientError> { - debug!("Getting reward set for reward cycle {reward_cycle}..."); + let timer = crate::monitoring::new_rpc_call_timer( + &self.reward_set_path(reward_cycle), + &self.http_origin, + ); let send_request = || { - self.stacks_node_client + let response = self + .stacks_node_client .get(self.reward_set_path(reward_cycle)) .send() - .map_err(backoff::Error::transient) + .map_err(|e| backoff::Error::transient(e.into()))?; + let status = response.status(); + if status.is_success() { + return response + .json() + .map_err(|e| backoff::Error::permanent(e.into())); + } + let error_data = response.json::().map_err(|e| { + warn!("Failed to parse the GetStackers error response: {e}"); + backoff::Error::permanent(e.into()) + })?; + if &error_data.err_type == GetStackersErrors::NOT_AVAILABLE_ERR_TYPE { + return Err(backoff::Error::transient(ClientError::NoSortitionOnChain)); + } else { + warn!("Got error response ({status}): {}", error_data.err_msg); + return Err(backoff::Error::permanent(ClientError::RequestFailure( + status, + ))); + } }; - let response = retry_with_exponential_backoff(send_request)?; - if !response.status().is_success() { - return Err(ClientError::RequestFailure(response.status())); - } - let stackers_response = response.json::()?; + let stackers_response = + retry_with_exponential_backoff::<_, ClientError, GetStackersResponse>(send_request)?; + timer.stop_and_record(); Ok(stackers_response.stacker_set.signers) } /// Retreive the current pox data from the stacks node - pub fn get_pox_data_with_retry(&self) -> Result { + pub fn get_pox_data(&self) -> Result { debug!("Getting pox data..."); + #[cfg(feature = "monitoring_prom")] + let timer = crate::monitoring::new_rpc_call_timer(&self.pox_path(), &self.http_origin); let send_request = || { self.stacks_node_client .get(self.pox_path()) @@ -394,6 +568,8 @@ impl StacksClient { .map_err(backoff::Error::transient) }; let response = retry_with_exponential_backoff(send_request)?; + #[cfg(feature = "monitoring_prom")] + timer.stop_and_record(); if !response.status().is_success() { return Err(ClientError::RequestFailure(response.status())); } @@ -403,23 +579,22 @@ impl StacksClient { /// Helper function to retrieve the burn tip height from the stacks node fn get_burn_block_height(&self) -> Result { - let peer_info = self.get_peer_info_with_retry()?; - Ok(peer_info.burn_block_height) + self.get_peer_info().map(|info| info.burn_block_height) } /// Get the current reward cycle info from the stacks node pub fn get_current_reward_cycle_info(&self) -> Result { - let pox_data = self.get_pox_data_with_retry()?; + let pox_data = self.get_pox_data()?; let blocks_mined = pox_data .current_burnchain_block_height .saturating_sub(pox_data.first_burnchain_block_height); - let reward_phase_block_length = pox_data + let reward_cycle_length = pox_data .reward_phase_block_length .saturating_add(pox_data.prepare_phase_block_length); - let reward_cycle = blocks_mined / reward_phase_block_length; + let reward_cycle = blocks_mined / reward_cycle_length; Ok(RewardCycleInfo { reward_cycle, - reward_phase_block_length, + reward_cycle_length, prepare_phase_block_length: pox_data.prepare_phase_block_length, first_burnchain_block_height: pox_data.first_burnchain_block_height, last_burnchain_block_height: pox_data.current_burnchain_block_height, @@ -427,11 +602,13 @@ impl StacksClient { } /// Helper function to retrieve the account info from the stacks node for a specific address - fn get_account_entry_with_retry( + pub fn get_account_entry( &self, address: &StacksAddress, ) -> Result { debug!("Getting account info..."); + let timer = + crate::monitoring::new_rpc_call_timer(&self.accounts_path(address), &self.http_origin); let send_request = || { self.stacks_node_client .get(self.accounts_path(address)) @@ -439,6 +616,7 @@ impl StacksClient { .map_err(backoff::Error::transient) }; let response = retry_with_exponential_backoff(send_request)?; + timer.stop_and_record(); if !response.status().is_success() { return Err(ClientError::RequestFailure(response.status())); } @@ -469,13 +647,12 @@ impl StacksClient { } /// Helper function to create a stacks transaction for a modifying contract call - pub fn build_vote_for_aggregate_public_key( + pub fn build_unsigned_vote_for_aggregate_public_key( &self, signer_index: u32, round: u64, dkg_public_key: Point, reward_cycle: u64, - tx_fee: Option, nonce: u64, ) -> Result { debug!("Building {SIGNERS_VOTING_FUNCTION_NAME} transaction..."); @@ -488,9 +665,8 @@ impl StacksClient { ClarityValue::UInt(round as u128), ClarityValue::UInt(reward_cycle as u128), ]; - let tx_fee = tx_fee.unwrap_or(0); - Self::build_signed_contract_call_transaction( + let unsigned_tx = Self::build_unsigned_contract_call_transaction( &contract_address, contract_name, function_name, @@ -499,17 +675,33 @@ impl StacksClient { self.tx_version, self.chain_id, nonce, - tx_fee, - ) + )?; + Ok(unsigned_tx) + } + + /// Try to post a completed nakamoto block to our connected stacks-node + /// Returns `true` if the block was accepted or `false` if the block + /// was rejected. + pub fn post_block(&self, block: &NakamotoBlock) -> Result { + let response = self + .stacks_node_client + .post(format!("{}{}", self.http_origin, postblock_v3::PATH)) + .header("Content-Type", "application/octet-stream") + .body(block.serialize_to_vec()) + .send()?; + if !response.status().is_success() { + return Err(ClientError::RequestFailure(response.status())); + } + let post_block_resp = response.json::()?; + Ok(post_block_resp.accepted) } /// Helper function to submit a transaction to the Stacks mempool - pub fn submit_transaction_with_retry( - &self, - tx: &StacksTransaction, - ) -> Result { + pub fn submit_transaction(&self, tx: &StacksTransaction) -> Result { let txid = tx.txid(); let tx = tx.serialize_to_vec(); + let timer = + crate::monitoring::new_rpc_call_timer(&self.transaction_path(), &self.http_origin); let send_request = || { self.stacks_node_client .post(self.transaction_path()) @@ -522,6 +714,7 @@ impl StacksClient { }) }; let response = retry_with_exponential_backoff(send_request)?; + timer.stop_and_record(); if !response.status().is_success() { return Err(ClientError::RequestFailure(response.status())); } @@ -550,12 +743,14 @@ impl StacksClient { let body = json!({"sender": self.stacks_address.to_string(), "arguments": args}).to_string(); let path = self.read_only_path(contract_addr, contract_name, function_name); + let timer = crate::monitoring::new_rpc_call_timer(&path, &self.http_origin); let response = self .stacks_node_client .post(path) .header("Content-Type", "application/json") .body(body) .send()?; + timer.stop_and_record(); if !response.status().is_success() { return Err(ClientError::RequestFailure(response.status())); } @@ -597,6 +792,19 @@ impl StacksClient { format!("{}/v2/block_proposal", self.http_origin) } + fn sortition_info_path(&self) -> String { + format!("{}{RPC_SORTITION_INFO_PATH}", self.http_origin) + } + + fn tenure_forking_info_path(&self, start: &ConsensusHash, stop: &ConsensusHash) -> String { + format!( + "{}{RPC_TENURE_FORKING_INFO_PATH}/{}/{}", + self.http_origin, + start.to_hex(), + stop.to_hex() + ) + } + fn core_info_path(&self) -> String { format!("{}/v2/info", self.http_origin) } @@ -609,9 +817,13 @@ impl StacksClient { format!("{}/v2/stacker_set/{reward_cycle}", self.http_origin) } + fn fees_transaction_path(&self) -> String { + format!("{}/v2/fees/transaction", self.http_origin) + } + /// Helper function to create a stacks transaction for a modifying contract call #[allow(clippy::too_many_arguments)] - pub fn build_signed_contract_call_transaction( + pub fn build_unsigned_contract_call_transaction( contract_addr: &StacksAddress, contract_name: ContractName, function_name: ClarityName, @@ -620,7 +832,6 @@ impl StacksClient { tx_version: TransactionVersion, chain_id: u32, nonce: u64, - tx_fee: u64, ) -> Result { let tx_payload = TransactionPayload::ContractCall(TransactionContractCall { address: *contract_addr, @@ -639,17 +850,22 @@ impl StacksClient { ); let mut unsigned_tx = StacksTransaction::new(tx_version, tx_auth, tx_payload); - - unsigned_tx.set_tx_fee(tx_fee); unsigned_tx.set_origin_nonce(nonce); unsigned_tx.anchor_mode = TransactionAnchorMode::Any; unsigned_tx.post_condition_mode = TransactionPostConditionMode::Allow; unsigned_tx.chain_id = chain_id; + Ok(unsigned_tx) + } + /// Sign an unsigned transaction + pub fn sign_transaction( + &self, + unsigned_tx: StacksTransaction, + ) -> Result { let mut tx_signer = StacksTransactionSigner::new(&unsigned_tx); tx_signer - .sign_origin(stacks_private_key) + .sign_origin(&self.stacks_private_key) .map_err(|e| ClientError::TransactionGenerationFailure(e.to_string()))?; tx_signer @@ -662,31 +878,32 @@ impl StacksClient { #[cfg(test)] mod tests { + use std::collections::BTreeMap; use std::io::{BufWriter, Write}; use std::thread::spawn; + use blockstack_lib::burnchains::Address; use blockstack_lib::chainstate::nakamoto::NakamotoBlockHeader; use blockstack_lib::chainstate::stacks::address::PoxAddress; use blockstack_lib::chainstate::stacks::boot::{ NakamotoSignerEntry, PoxStartCycleInfo, RewardSet, }; - use blockstack_lib::chainstate::stacks::ThresholdSignature; + use clarity::vm::types::{ + ListData, ListTypeData, ResponseData, SequenceData, TupleData, TupleTypeSignature, + TypeSignature, + }; use rand::thread_rng; use rand_core::RngCore; - use stacks_common::bitvec::BitVec; use stacks_common::consts::{CHAIN_ID_TESTNET, SIGNER_SLOTS_PER_USER}; - use stacks_common::types::chainstate::{ConsensusHash, StacksBlockId, TrieHash}; - use stacks_common::util::hash::Sha512Trunc256Sum; - use stacks_common::util::secp256k1::MessageSignature; use wsts::curve::scalar::Scalar; use super::*; use crate::client::tests::{ build_account_nonce_response, build_get_approved_aggregate_key_response, - build_get_last_round_response, build_get_peer_info_response, build_get_pox_data_response, - build_get_round_info_response, build_get_vote_for_aggregate_key_response, - build_get_weight_threshold_response, build_read_only_response, write_response, - MockServerClient, + build_get_last_round_response, build_get_medium_estimated_fee_ustx_response, + build_get_peer_info_response, build_get_pox_data_response, build_get_round_info_response, + build_get_vote_for_aggregate_key_response, build_get_weight_threshold_response, + build_read_only_response, write_response, MockServerClient, }; #[test] @@ -854,12 +1071,11 @@ mod tests { assert!(result.is_err()) } - #[ignore] #[test] fn transaction_contract_call_should_send_bytes_to_node() { let mock = MockServerClient::new(); let private_key = StacksPrivateKey::new(); - let tx = StacksClient::build_signed_contract_call_transaction( + let unsigned_tx = StacksClient::build_unsigned_contract_call_transaction( &mock.client.stacks_address, ContractName::from("contract-name"), ClarityName::from("function-name"), @@ -868,10 +1084,11 @@ mod tests { TransactionVersion::Testnet, CHAIN_ID_TESTNET, 0, - 10_000, ) .unwrap(); + let tx = mock.client.sign_transaction(unsigned_tx).unwrap(); + let mut tx_bytes = [0u8; 1024]; { let mut tx_bytes_writer = BufWriter::new(&mut tx_bytes[..]); @@ -889,7 +1106,7 @@ mod tests { + 1; let tx_clone = tx.clone(); - let h = spawn(move || mock.client.submit_transaction_with_retry(&tx_clone)); + let h = spawn(move || mock.client.submit_transaction(&tx_clone)); let request_bytes = write_response( mock.server, @@ -906,7 +1123,6 @@ mod tests { ); } - #[ignore] #[test] fn build_vote_for_aggregate_public_key_should_succeed() { let mock = MockServerClient::new(); @@ -917,19 +1133,17 @@ mod tests { let reward_cycle = thread_rng().next_u64(); let h = spawn(move || { - mock.client.build_vote_for_aggregate_public_key( + mock.client.build_unsigned_vote_for_aggregate_public_key( signer_index, round, point, reward_cycle, - None, nonce, ) }); assert!(h.join().unwrap().is_ok()); } - #[ignore] #[test] fn broadcast_vote_for_aggregate_public_key_should_succeed() { let mock = MockServerClient::new(); @@ -938,28 +1152,27 @@ mod tests { let signer_index = thread_rng().next_u32(); let round = thread_rng().next_u64(); let reward_cycle = thread_rng().next_u64(); + let unsigned_tx = mock + .client + .build_unsigned_vote_for_aggregate_public_key( + signer_index, + round, + point, + reward_cycle, + nonce, + ) + .unwrap(); + let tx = mock.client.sign_transaction(unsigned_tx).unwrap(); + let tx_clone = tx.clone(); + let h = spawn(move || mock.client.submit_transaction(&tx_clone)); - let h = spawn(move || { - let tx = mock - .client - .clone() - .build_vote_for_aggregate_public_key( - signer_index, - round, - point, - reward_cycle, - None, - nonce, - ) - .unwrap(); - mock.client.submit_transaction_with_retry(&tx) - }); - let mock = MockServerClient::from_config(mock.config); write_response( mock.server, - b"HTTP/1.1 200 OK\n\n4e99f99bc4a05437abb8c7d0c306618f45b203196498e2ebe287f10497124958", + format!("HTTP/1.1 200 OK\n\n{}", tx.txid()).as_bytes(), ); - assert!(h.join().unwrap().is_ok()); + let returned_txid = h.join().unwrap().unwrap(); + + assert_eq!(returned_txid, tx.txid()); } #[test] @@ -1009,9 +1222,59 @@ mod tests { #[test] fn parse_valid_signer_slots_should_succeed() { let mock = MockServerClient::new(); - let clarity_value_hex = - "0x070b000000050c00000002096e756d2d736c6f7473010000000000000000000000000000000d067369676e6572051a8195196a9a7cf9c37cb13e1ed69a7bc047a84e050c00000002096e756d2d736c6f7473010000000000000000000000000000000d067369676e6572051a6505471146dcf722f0580911183f28bef30a8a890c00000002096e756d2d736c6f7473010000000000000000000000000000000d067369676e6572051a1d7f8e3936e5da5f32982cc47f31d7df9fb1b38a0c00000002096e756d2d736c6f7473010000000000000000000000000000000d067369676e6572051a126d1a814313c952e34c7840acec9211e1727fb80c00000002096e756d2d736c6f7473010000000000000000000000000000000d067369676e6572051a7374ea6bb39f2e8d3d334d62b9f302a977de339a"; - let value = ClarityValue::try_deserialize_hex_untyped(clarity_value_hex).unwrap(); + + let signers = [ + "ST20SA6BAK9YFKGVWP4Z1XNMTFF04FA2E0M8YRNNQ", + "ST1JGAHRH8VEFE8QGB04H261Z52ZF62MAH40CD6ZN", + "STEQZ3HS6VJXMQSJK0PC8ZSHTZFSZCDKHA7R60XT", + "ST96T6M18C9WJMQ39HW41B7CJ88Y2WKZQ1CK330M", + "ST1SQ9TKBPEFJX39X6D6P5EFK0AMQFQHKK9R0MJFC", + ]; + + let tuple_type_signature: TupleTypeSignature = [ + (ClarityName::from("num_slots"), TypeSignature::UIntType), + (ClarityName::from("signer"), TypeSignature::PrincipalType), + ] + .into_iter() + .collect::>() + .try_into() + .unwrap(); + + let list_data: Vec<_> = signers + .into_iter() + .map(|signer| { + let principal_data = StacksAddress::from_string(signer).unwrap().into(); + + let data_map = [ + ("num-slots".into(), ClarityValue::UInt(13)), + ( + "signer".into(), + ClarityValue::Principal(PrincipalData::Standard(principal_data)), + ), + ] + .into_iter() + .collect(); + + ClarityValue::Tuple(TupleData { + type_signature: tuple_type_signature.clone(), + data_map, + }) + }) + .collect(); + + let list_type_signature = + ListTypeData::new_list(TypeSignature::TupleType(tuple_type_signature), 5).unwrap(); + + let sequence = ClarityValue::Sequence(SequenceData::List(ListData { + data: list_data, + type_signature: list_type_signature, + })); + + let value = ClarityValue::Response(ResponseData { + committed: true, + data: Box::new(sequence), + }); + let signer_slots = mock.client.parse_signer_slots(value).unwrap(); assert_eq!(signer_slots.len(), 5); signer_slots @@ -1117,23 +1380,12 @@ mod tests { #[test] fn submit_block_for_validation_should_succeed() { let mock = MockServerClient::new(); - let header = NakamotoBlockHeader { - version: 1, - chain_length: 2, - burn_spent: 3, - consensus_hash: ConsensusHash([0x04; 20]), - parent_block_id: StacksBlockId([0x05; 32]), - tx_merkle_root: Sha512Trunc256Sum([0x06; 32]), - state_index_root: TrieHash([0x07; 32]), - miner_signature: MessageSignature::empty(), - signer_signature: ThresholdSignature::empty(), - signer_bitvec: BitVec::zeros(1).unwrap(), - }; + let header = NakamotoBlockHeader::empty(); let block = NakamotoBlock { header, txs: vec![], }; - let h = spawn(move || mock.client.submit_block_for_validation_with_retry(block)); + let h = spawn(move || mock.client.submit_block_for_validation(block)); write_response(mock.server, b"HTTP/1.1 200 OK\n\n"); assert!(h.join().unwrap().is_ok()); } @@ -1141,23 +1393,12 @@ mod tests { #[test] fn submit_block_for_validation_should_fail() { let mock = MockServerClient::new(); - let header = NakamotoBlockHeader { - version: 1, - chain_length: 2, - burn_spent: 3, - consensus_hash: ConsensusHash([0x04; 20]), - parent_block_id: StacksBlockId([0x05; 32]), - tx_merkle_root: Sha512Trunc256Sum([0x06; 32]), - state_index_root: TrieHash([0x07; 32]), - miner_signature: MessageSignature::empty(), - signer_signature: ThresholdSignature::empty(), - signer_bitvec: BitVec::zeros(1).unwrap(), - }; + let header = NakamotoBlockHeader::empty(); let block = NakamotoBlock { header, txs: vec![], }; - let h = spawn(move || mock.client.submit_block_for_validation_with_retry(block)); + let h = spawn(move || mock.client.submit_block_for_validation(block)); write_response(mock.server, b"HTTP/1.1 404 Not Found\n\n"); assert!(h.join().unwrap().is_err()); } @@ -1166,9 +1407,20 @@ mod tests { fn get_peer_info_should_succeed() { let mock = MockServerClient::new(); let (response, peer_info) = build_get_peer_info_response(None, None); - let h = spawn(move || mock.client.get_peer_info_with_retry()); + let h = spawn(move || mock.client.get_peer_info()); write_response(mock.server, response.as_bytes()); - assert_eq!(h.join().unwrap().unwrap(), peer_info); + let reduced_peer_info = h.join().unwrap().unwrap(); + assert_eq!( + reduced_peer_info.burn_block_height, + peer_info.burn_block_height + ); + assert_eq!(reduced_peer_info.pox_consensus, peer_info.pox_consensus); + assert_eq!( + reduced_peer_info.stacks_tip_consensus_hash, + peer_info.stacks_tip_consensus_hash + ); + assert_eq!(reduced_peer_info.stacks_tip, peer_info.stacks_tip); + assert_eq!(reduced_peer_info.server_version, peer_info.server_version); } #[test] @@ -1207,7 +1459,7 @@ mod tests { let stackers_response_json = serde_json::to_string(&stackers_response) .expect("Failed to serialize get stacker response"); let response = format!("HTTP/1.1 200 OK\n\n{stackers_response_json}"); - let h = spawn(move || mock.client.get_reward_set_signers_with_retry(0)); + let h = spawn(move || mock.client.get_reward_set_signers(0)); write_response(mock.server, response.as_bytes()); assert_eq!(h.join().unwrap().unwrap(), stacker_set.signers); } @@ -1262,4 +1514,27 @@ mod tests { write_response(mock.server, round_response.as_bytes()); assert_eq!(h.join().unwrap().unwrap(), weight as u128); } + + #[test] + fn get_medium_estimated_fee_ustx_should_succeed() { + let mock = MockServerClient::new(); + let private_key = StacksPrivateKey::new(); + let unsigned_tx = StacksClient::build_unsigned_contract_call_transaction( + &mock.client.stacks_address, + ContractName::from("contract-name"), + ClarityName::from("function-name"), + &[], + &private_key, + TransactionVersion::Testnet, + CHAIN_ID_TESTNET, + 0, + ) + .unwrap(); + + let estimate = thread_rng().next_u64(); + let response = build_get_medium_estimated_fee_ustx_response(estimate).0; + let h = spawn(move || mock.client.get_medium_estimated_fee_ustx(&unsigned_tx)); + write_response(mock.server, response.as_bytes()); + assert_eq!(h.join().unwrap().unwrap(), estimate); + } } diff --git a/stacks-signer/src/config.rs b/stacks-signer/src/config.rs index dc48dda27ad..66cf5a5f7d5 100644 --- a/stacks-signer/src/config.rs +++ b/stacks-signer/src/config.rs @@ -14,28 +14,30 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -use std::fmt::Display; +use std::fmt::{Debug, Display}; use std::fs; use std::net::{SocketAddr, ToSocketAddrs}; use std::path::PathBuf; use std::time::Duration; use blockstack_lib::chainstate::stacks::TransactionVersion; +use clarity::util::hash::to_hex; use libsigner::SignerEntries; use serde::Deserialize; use stacks_common::address::{ - AddressHashMode, C32_ADDRESS_VERSION_MAINNET_SINGLESIG, C32_ADDRESS_VERSION_TESTNET_SINGLESIG, + C32_ADDRESS_VERSION_MAINNET_SINGLESIG, C32_ADDRESS_VERSION_TESTNET_SINGLESIG, }; use stacks_common::consts::{CHAIN_ID_MAINNET, CHAIN_ID_TESTNET}; use stacks_common::types::chainstate::{StacksAddress, StacksPrivateKey, StacksPublicKey}; use stacks_common::types::PrivateKey; +use stacks_common::util::hash::Hash160; use wsts::curve::scalar::Scalar; -use crate::signer::SignerSlotID; +use crate::client::SignerSlotID; const EVENT_TIMEOUT_MS: u64 = 5000; -// Default transaction fee in microstacks (if unspecificed in the config file) -// TODO: Use the fee estimation endpoint to get the default fee. +const BLOCK_PROPOSAL_TIMEOUT_MS: u64 = 45_000; +// Default transaction fee to use in microstacks (if unspecificed in the config file) const TX_FEE_USTX: u64 = 10_000; #[derive(thiserror::Error, Debug)] @@ -144,14 +146,21 @@ pub struct SignerConfig { pub nonce_timeout: Option, /// timeout to gather signature shares pub sign_timeout: Option, - /// the STX tx fee to use in uSTX + /// the STX tx fee to use in uSTX. pub tx_fee_ustx: u64, + /// If set, will use the estimated fee up to this amount. + pub max_tx_fee_ustx: Option, /// The path to the signer's database file pub db_path: PathBuf, + /// How much time must pass between the first block proposal in a tenure and the next bitcoin block + /// before a subsequent miner isn't allowed to reorg the tenure + pub first_proposal_burn_block_timing: Duration, + /// How much time to wait for a miner to propose a block following a sortition + pub block_proposal_timeout: Duration, } /// The parsed configuration for the signer -#[derive(Clone, Debug)] +#[derive(Clone)] pub struct GlobalConfig { /// endpoint to the stacks node pub node_host: String, @@ -177,12 +186,21 @@ pub struct GlobalConfig { pub nonce_timeout: Option, /// timeout to gather signature shares pub sign_timeout: Option, - /// the STX tx fee to use in uSTX + /// the STX tx fee to use in uSTX. pub tx_fee_ustx: u64, + /// the max STX tx fee to use in uSTX when estimating fees + pub max_tx_fee_ustx: Option, /// the authorization password for the block proposal endpoint pub auth_password: String, /// The path to the signer's database file pub db_path: PathBuf, + /// Metrics endpoint + pub metrics_endpoint: Option, + /// How much time between the first block proposal in a tenure and the next bitcoin block + /// must pass before a subsequent miner isn't allowed to reorg the tenure + pub first_proposal_burn_block_timing: Duration, + /// How much time to wait for a miner to propose a block following a sortition + pub block_proposal_timeout: Duration, } /// Internal struct for loading up the config file @@ -209,12 +227,22 @@ struct RawConfigFile { pub nonce_timeout_ms: Option, /// timeout in (millisecs) to gather signature shares pub sign_timeout_ms: Option, - /// the STX tx fee to use in uSTX + /// the STX tx fee to use in uSTX. If not set, will default to TX_FEE_USTX pub tx_fee_ustx: Option, + /// the max STX tx fee to use in uSTX when estimating fees. + /// If not set, will use tx_fee_ustx. + pub max_tx_fee_ustx: Option, /// The authorization password for the block proposal endpoint pub auth_password: String, /// The path to the signer's database file or :memory: for an in-memory database pub db_path: String, + /// Metrics endpoint + pub metrics_endpoint: Option, + /// How much time must pass between the first block proposal in a tenure and the next bitcoin block + /// before a subsequent miner isn't allowed to reorg the tenure + pub first_proposal_burn_block_timing_secs: Option, + /// How much time to wait for a miner to propose a block following a sortition in milliseconds + pub block_proposal_timeout_ms: Option, } impl RawConfigFile { @@ -276,13 +304,9 @@ impl TryFrom for GlobalConfig { ) })?; let stacks_public_key = StacksPublicKey::from_private(&stacks_private_key); - let stacks_address = StacksAddress::from_public_keys( - raw_data.network.to_address_version(), - &AddressHashMode::SerializeP2PKH, - 1, - &vec![stacks_public_key], - ) - .ok_or(ConfigError::UnsupportedAddressVersion)?; + let signer_hash = Hash160::from_data(stacks_public_key.to_bytes_compressed().as_slice()); + let stacks_address = + StacksAddress::p2pkh_from_hash(raw_data.network.is_mainnet(), signer_hash); let event_timeout = Duration::from_millis(raw_data.event_timeout_ms.unwrap_or(EVENT_TIMEOUT_MS)); let dkg_end_timeout = raw_data.dkg_end_timeout_ms.map(Duration::from_millis); @@ -290,8 +314,29 @@ impl TryFrom for GlobalConfig { let dkg_private_timeout = raw_data.dkg_private_timeout_ms.map(Duration::from_millis); let nonce_timeout = raw_data.nonce_timeout_ms.map(Duration::from_millis); let sign_timeout = raw_data.sign_timeout_ms.map(Duration::from_millis); + let first_proposal_burn_block_timing = + Duration::from_secs(raw_data.first_proposal_burn_block_timing_secs.unwrap_or(30)); let db_path = raw_data.db_path.into(); + let metrics_endpoint = match raw_data.metrics_endpoint { + Some(endpoint) => Some( + endpoint + .to_socket_addrs() + .map_err(|_| ConfigError::BadField("endpoint".to_string(), endpoint.clone()))? + .next() + .ok_or_else(|| { + ConfigError::BadField("endpoint".to_string(), endpoint.clone()) + })?, + ), + None => None, + }; + + let block_proposal_timeout = Duration::from_millis( + raw_data + .block_proposal_timeout_ms + .unwrap_or(BLOCK_PROPOSAL_TIMEOUT_MS), + ); + Ok(Self { node_host: raw_data.node_host, endpoint, @@ -306,8 +351,12 @@ impl TryFrom for GlobalConfig { nonce_timeout, sign_timeout, tx_fee_ustx: raw_data.tx_fee_ustx.unwrap_or(TX_FEE_USTX), + max_tx_fee_ustx: raw_data.max_tx_fee_ustx, auth_password: raw_data.auth_password, db_path, + metrics_endpoint, + first_proposal_burn_block_timing, + block_proposal_timeout, }) } } @@ -338,6 +387,10 @@ impl GlobalConfig { 0 => "default".to_string(), _ => (self.tx_fee_ustx as f64 / 1_000_000.0).to_string(), }; + let metrics_endpoint = match &self.metrics_endpoint { + Some(endpoint) => endpoint.to_string(), + None => "None".to_string(), + }; format!( r#" Stacks node host: {node_host} @@ -347,14 +400,18 @@ Public key: {public_key} Network: {network} Database path: {db_path} DKG transaction fee: {tx_fee} uSTX +Metrics endpoint: {metrics_endpoint} "#, node_host = self.node_host, endpoint = self.endpoint, - stacks_address = self.stacks_address.to_string(), - public_key = StacksPublicKey::from_private(&self.stacks_private_key).to_hex(), + stacks_address = self.stacks_address, + public_key = to_hex( + &StacksPublicKey::from_private(&self.stacks_private_key).to_bytes_compressed() + ), network = self.network, db_path = self.db_path.to_str().unwrap_or_default(), - tx_fee = tx_fee + tx_fee = tx_fee, + metrics_endpoint = metrics_endpoint, ) } } @@ -365,7 +422,14 @@ impl Display for GlobalConfig { } } +impl Debug for GlobalConfig { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "{}", self.config_to_log_string()) + } +} + /// Helper function for building a signer config for each provided signer private key +#[allow(clippy::too_many_arguments)] pub fn build_signer_config_tomls( stacks_private_keys: &[StacksPrivateKey], node_host: &str, @@ -374,6 +438,9 @@ pub fn build_signer_config_tomls( password: &str, run_stamp: u16, mut port_start: usize, + max_tx_fee_ustx: Option, + tx_fee_ustx: Option, + mut metrics_port_start: Option, ) -> Vec { let mut signer_config_tomls = vec![]; @@ -405,11 +472,40 @@ db_path = "{db_path}" signer_config_toml = format!( r#" {signer_config_toml} -event_timeout = {event_timeout_ms} +event_timeout = {event_timeout_ms} +"# + ) + } + + if let Some(max_tx_fee_ustx) = max_tx_fee_ustx { + signer_config_toml = format!( + r#" +{signer_config_toml} +max_tx_fee_ustx = {max_tx_fee_ustx} "# ) } + if let Some(tx_fee_ustx) = tx_fee_ustx { + signer_config_toml = format!( + r#" +{signer_config_toml} +tx_fee_ustx = {tx_fee_ustx} +"# + ) + } + + if let Some(metrics_port) = metrics_port_start { + let metrics_endpoint = format!("localhost:{}", metrics_port); + signer_config_toml = format!( + r#" +{signer_config_toml} +metrics_endpoint = "{metrics_endpoint}" +"# + ); + metrics_port_start = Some(metrics_port + 1); + } + signer_config_tomls.push(signer_config_toml); } @@ -439,22 +535,145 @@ mod tests { password, rand::random(), 3000, + None, + None, + Some(4000), ); let config = RawConfigFile::load_from_str(&config_tomls[0]).expect("Failed to parse config file"); assert_eq!(config.auth_password, "melon"); + assert!(config.max_tx_fee_ustx.is_none()); + assert!(config.tx_fee_ustx.is_none()); + assert_eq!(config.metrics_endpoint, Some("localhost:4000".to_string())); + } + + #[test] + fn fee_options_should_deserialize_correctly() { + let pk = StacksPrivateKey::from_hex( + "eb05c83546fdd2c79f10f5ad5434a90dd28f7e3acb7c092157aa1bc3656b012c01", + ) + .unwrap(); + + let node_host = "localhost"; + let network = Network::Testnet; + let password = "melon"; + + // Test both max_tx_fee_ustx and tx_fee_ustx are unspecified + let config_tomls = build_signer_config_tomls( + &[pk], + node_host, + None, + &network, + password, + rand::random(), + 3000, + None, + None, + None, + ); + + let config = + RawConfigFile::load_from_str(&config_tomls[0]).expect("Failed to parse config file"); + + assert!(config.max_tx_fee_ustx.is_none()); + assert!(config.tx_fee_ustx.is_none()); + + let config = GlobalConfig::try_from(config).expect("Failed to parse config"); + assert!(config.max_tx_fee_ustx.is_none()); + assert_eq!(config.tx_fee_ustx, TX_FEE_USTX); + + // Test both max_tx_fee_ustx and tx_fee_ustx are specified + let max_tx_fee_ustx = Some(1000); + let tx_fee_ustx = Some(2000); + let config_tomls = build_signer_config_tomls( + &[pk], + node_host, + None, + &network, + password, + rand::random(), + 3000, + max_tx_fee_ustx, + tx_fee_ustx, + None, + ); + + let config = + RawConfigFile::load_from_str(&config_tomls[0]).expect("Failed to parse config file"); + + assert_eq!(config.max_tx_fee_ustx, max_tx_fee_ustx); + assert_eq!(config.tx_fee_ustx, tx_fee_ustx); + + // Test only max_tx_fee_ustx is specified + let max_tx_fee_ustx = Some(1000); + let config_tomls = build_signer_config_tomls( + &[pk], + node_host, + None, + &network, + password, + rand::random(), + 3000, + max_tx_fee_ustx, + None, + None, + ); + + let config = + RawConfigFile::load_from_str(&config_tomls[0]).expect("Failed to parse config file"); + + assert_eq!(config.max_tx_fee_ustx, max_tx_fee_ustx); + assert!(config.tx_fee_ustx.is_none()); + + let config = GlobalConfig::try_from(config).expect("Failed to parse config"); + assert_eq!(config.max_tx_fee_ustx, max_tx_fee_ustx); + assert_eq!(config.tx_fee_ustx, TX_FEE_USTX); + + // Test only tx_fee_ustx is specified + let tx_fee_ustx = Some(1000); + let config_tomls = build_signer_config_tomls( + &[pk], + node_host, + None, + &network, + password, + rand::random(), + 3000, + None, + tx_fee_ustx, + None, + ); + + let config = + RawConfigFile::load_from_str(&config_tomls[0]).expect("Failed to parse config file"); + + assert!(config.max_tx_fee_ustx.is_none()); + assert_eq!(config.tx_fee_ustx, tx_fee_ustx); + + let config = GlobalConfig::try_from(config).expect("Failed to parse config"); + assert!(config.max_tx_fee_ustx.is_none()); + assert_eq!(Some(config.tx_fee_ustx), tx_fee_ustx); } #[test] fn test_config_to_string() { let config = GlobalConfig::load_from_file("./src/tests/conf/signer-0.toml").unwrap(); let config_str = config.config_to_log_string(); - assert_eq!( - config_str, - format!( - r#" + + let expected_str_v4 = r#" +Stacks node host: 127.0.0.1:20443 +Signer endpoint: 127.0.0.1:30000 +Stacks address: ST3FPN8KBZ3YPBP0ZJGAAHTVFMQDTJCR5QPS7VTNJ +Public key: 03bc489f27da3701d9f9e577c88de5567cf4023111b7577042d55cde4d823a3505 +Network: testnet +Database path: :memory: +DKG transaction fee: 0.01 uSTX +Metrics endpoint: 0.0.0.0:9090 +"#; + + let expected_str_v6 = r#" Stacks node host: 127.0.0.1:20443 Signer endpoint: [::1]:30000 Stacks address: ST3FPN8KBZ3YPBP0ZJGAAHTVFMQDTJCR5QPS7VTNJ @@ -462,8 +681,52 @@ Public key: 03bc489f27da3701d9f9e577c88de5567cf4023111b7577042d55cde4d823a3505 Network: testnet Database path: :memory: DKG transaction fee: 0.01 uSTX -"# - ) +Metrics endpoint: 0.0.0.0:9090 +"#; + + assert!( + config_str == expected_str_v4 || config_str == expected_str_v6, + "Config string does not match expected output. Actual:\n{}", + config_str + ); + } + + #[test] + // Test the same private key twice, with and without a compression flag. + // Ensure that the address is the same in both cases. + fn test_stacks_addr_from_priv_key() { + // 64 bytes, no compression flag + let sk_hex = "2de4e77aab89c0c2570bb8bb90824f5cf2a5204a975905fee450ff9dad0fcf28"; + + let expected_addr = "SP1286C62P3TAWVQV2VM2CEGTRBQZSZ6MHMS9RW05"; + + let config_toml = format!( + r#" +stacks_private_key = "{sk_hex}" +node_host = "localhost" +endpoint = "localhost:30000" +network = "mainnet" +auth_password = "abcd" +db_path = ":memory:" + "# + ); + let config = GlobalConfig::load_from_str(&config_toml).unwrap(); + assert_eq!(config.stacks_address.to_string(), expected_addr); + + // 65 bytes (with compression flag) + let sk_hex = "2de4e77aab89c0c2570bb8bb90824f5cf2a5204a975905fee450ff9dad0fcf2801"; + + let config_toml = format!( + r#" +stacks_private_key = "{sk_hex}" +node_host = "localhost" +endpoint = "localhost:30000" +network = "mainnet" +auth_password = "abcd" +db_path = ":memory:" + "# ); + let config = GlobalConfig::load_from_str(&config_toml).unwrap(); + assert_eq!(config.stacks_address.to_string(), expected_addr); } } diff --git a/stacks-signer/src/lib.rs b/stacks-signer/src/lib.rs index 9dcd0a069f9..2cbdc579c92 100644 --- a/stacks-signer/src/lib.rs +++ b/stacks-signer/src/lib.rs @@ -1,6 +1,6 @@ #![forbid(missing_docs)] /*! -# stacks-signer: a libary for creating a Stacks compliant signer. A default implementation binary is also provided. +# stacks-signer: a library for creating a Stacks compliant signer. A default implementation binary is also provided. Usage documentation can be found in the [README](https://github.com/Trust-Machines/core-eng/stacks-signer-api/README.md). */ @@ -20,17 +20,132 @@ Usage documentation can be found in the [README](https://github.com/Trust-Machin // You should have received a copy of the GNU General Public License // along with this program. If not, see . +/// This module stores chainstate information about Stacks, SortitionDB for +/// tracking by the signer. +pub mod chainstate; /// The cli module for the signer binary pub mod cli; /// The signer client for communicating with stackerdb/stacks nodes pub mod client; /// The configuration module for the signer pub mod config; -/// The coordinator selector for the signer -pub mod coordinator; +/// The monitoring server for the signer +pub mod monitoring; /// The primary runloop for the signer pub mod runloop; -/// The signer module for processing events -pub mod signer; -/// The state module for the signer +/// The signer state module pub mod signerdb; +/// The v0 implementation of the signer. This does not include WSTS support +pub mod v0; +/// The v1 implementation of the singer. This includes WSTS support +pub mod v1; + +#[cfg(test)] +mod tests; + +use std::fmt::{Debug, Display}; +use std::sync::mpsc::{channel, Receiver, Sender}; + +use chainstate::SortitionsView; +use config::GlobalConfig; +use libsigner::{SignerEvent, SignerEventReceiver, SignerEventTrait}; +use runloop::SignerResult; +use slog::{slog_info, slog_warn}; +use stacks_common::{info, warn}; + +use crate::client::StacksClient; +use crate::config::SignerConfig; +use crate::runloop::{RunLoop, RunLoopCommand}; + +/// A trait which provides a common `Signer` interface for `v0` and `v1` +pub trait Signer: Debug + Display { + /// Create a new `Signer` instance + fn new(config: SignerConfig) -> Self; + /// Get the reward cycle of the signer + fn reward_cycle(&self) -> u64; + /// Process an event + fn process_event( + &mut self, + stacks_client: &StacksClient, + sortition_state: &mut Option, + event: Option<&SignerEvent>, + res: Sender>, + current_reward_cycle: u64, + ); + /// Process a command + fn process_command( + &mut self, + stacks_client: &StacksClient, + current_reward_cycle: u64, + command: Option, + ); + /// Check if the signer is in the middle of processing blocks + fn has_pending_blocks(&self) -> bool; +} + +/// A wrapper around the running signer type for the signer +pub type RunningSigner = libsigner::RunningSigner, Vec, T>; + +/// The wrapper for the runloop signer type +type RunLoopSigner = + libsigner::Signer, RunLoop, SignerEventReceiver, T>; + +/// The spawned signer +pub struct SpawnedSigner + Send, T: SignerEventTrait> { + /// The underlying running signer thread handle + running_signer: RunningSigner, + /// The command sender for interacting with the running signer + pub cmd_send: Sender, + /// The result receiver for interacting with the running signer + pub res_recv: Receiver>, + /// The spawned signer's config + pub config: GlobalConfig, + /// Phantom data for the signer type + _phantom: std::marker::PhantomData, +} + +impl + Send, T: SignerEventTrait> SpawnedSigner { + /// Stop the signer thread and return the final state + pub fn stop(self) -> Option> { + self.running_signer.stop() + } + + /// Wait for the signer to terminate, and get the final state. WARNING: This will hang forever if the event receiver stop signal was never sent/no error occurred. + pub fn join(self) -> Option> { + self.running_signer.join() + } +} + +impl + Send + 'static, T: SignerEventTrait + 'static> SpawnedSigner { + /// Create a new spawned signer + pub fn new(config: GlobalConfig) -> Self { + let endpoint = config.endpoint; + info!("Starting signer with config: {:?}", config); + warn!( + "Reminder: The signer is primarily designed for use with a local or subnet network stacks node. \ + It's important to exercise caution if you are communicating with an external node, \ + as this could potentially expose sensitive data or functionalities to security risks \ + if additional proper security checks are not integrated in place. \ + For more information, check the documentation at \ + https://docs.stacks.co/nakamoto-upgrade/signing-and-stacking/faq#what-should-the-networking-setup-for-my-signer-look-like." + ); + let (cmd_send, cmd_recv) = channel(); + let (res_send, res_recv) = channel(); + let ev = SignerEventReceiver::new(config.network.is_mainnet()); + #[cfg(feature = "monitoring_prom")] + { + crate::monitoring::start_serving_monitoring_metrics(config.clone()).ok(); + } + let runloop = RunLoop::new(config.clone()); + let mut signer: RunLoopSigner = + libsigner::Signer::new(runloop, ev, cmd_recv, res_send); + let running_signer = signer.spawn(endpoint).expect("Failed to spawn signer"); + SpawnedSigner { + running_signer, + cmd_send, + res_recv, + _phantom: std::marker::PhantomData, + config, + } + } +} diff --git a/stacks-signer/src/main.rs b/stacks-signer/src/main.rs index 34a9f62dc33..184876373bd 100644 --- a/stacks-signer/src/main.rs +++ b/stacks-signer/src/main.rs @@ -26,40 +26,26 @@ extern crate serde; extern crate serde_json; extern crate toml; -use std::fs::File; -use std::io::{self, BufRead, Write}; -use std::path::{Path, PathBuf}; -use std::sync::mpsc::{channel, Receiver, Sender}; -use std::time::Duration; +use std::io::{self, Write}; -use blockstack_lib::chainstate::nakamoto::NakamotoBlock; use blockstack_lib::util_lib::signed_structured_data::pox4::make_pox_4_signer_key_signature; use clap::Parser; +use clarity::types::chainstate::StacksPublicKey; use clarity::vm::types::QualifiedContractIdentifier; -use libsigner::{RunningSigner, Signer, SignerEventReceiver, SignerSession, StackerDBSession}; +use libsigner::{SignerSession, StackerDBSession}; use libstackerdb::StackerDBChunkData; -use slog::{slog_debug, slog_error, slog_info}; -use stacks_common::codec::read_next; -use stacks_common::types::chainstate::StacksPrivateKey; +use slog::slog_debug; +use stacks_common::debug; use stacks_common::util::hash::to_hex; -use stacks_common::util::secp256k1::{MessageSignature, Secp256k1PublicKey}; -use stacks_common::{debug, error, info}; +use stacks_common::util::secp256k1::MessageSignature; use stacks_signer::cli::{ - Cli, Command, GenerateFilesArgs, GenerateStackingSignatureArgs, GetChunkArgs, - GetLatestChunkArgs, PutChunkArgs, RunDkgArgs, RunSignerArgs, SignArgs, StackerDBArgs, + Cli, Command, GenerateStackingSignatureArgs, GenerateVoteArgs, GetChunkArgs, + GetLatestChunkArgs, PutChunkArgs, RunSignerArgs, StackerDBArgs, VerifyVoteArgs, }; -use stacks_signer::config::{build_signer_config_tomls, GlobalConfig}; -use stacks_signer::runloop::{RunLoop, RunLoopCommand}; -use stacks_signer::signer::Command as SignerCommand; +use stacks_signer::config::GlobalConfig; +use stacks_signer::v0::SpawnedSigner; use tracing_subscriber::prelude::*; use tracing_subscriber::{fmt, EnvFilter}; -use wsts::state_machine::OperationResult; - -struct SpawnedSigner { - running_signer: RunningSigner>, - cmd_send: Sender, - res_recv: Receiver>, -} /// Create a new stacker db session fn stackerdb_session(host: &str, contract: QualifiedContractIdentifier) -> StackerDBSession { @@ -71,90 +57,15 @@ fn stackerdb_session(host: &str, contract: QualifiedContractIdentifier) -> Stack /// Write the chunk to stdout fn write_chunk_to_stdout(chunk_opt: Option>) { if let Some(chunk) = chunk_opt.as_ref() { - let bytes = io::stdout().write(chunk).unwrap(); - if bytes < chunk.len() { + let hexed_string = to_hex(chunk); + let hexed_chunk = hexed_string.as_bytes(); + let bytes = io::stdout().write(hexed_chunk).unwrap(); + if bytes < hexed_chunk.len() { print!( "Failed to write complete chunk to stdout. Missing {} bytes", - chunk.len() - bytes - ); - } - } -} - -// Spawn a running signer and return its handle, command sender, and result receiver -fn spawn_running_signer(path: &PathBuf) -> SpawnedSigner { - let config = GlobalConfig::try_from(path).unwrap(); - let endpoint = config.endpoint; - info!("Starting signer with config: {}", config); - let (cmd_send, cmd_recv) = channel(); - let (res_send, res_recv) = channel(); - let ev = SignerEventReceiver::new(config.network.is_mainnet()); - let runloop = RunLoop::from(config); - let mut signer: Signer, RunLoop, SignerEventReceiver> = - Signer::new(runloop, ev, cmd_recv, res_send); - let running_signer = signer.spawn(endpoint).unwrap(); - SpawnedSigner { - running_signer, - cmd_send, - res_recv, - } -} - -// Process a DKG result -fn process_dkg_result(dkg_res: &[OperationResult]) { - assert!(dkg_res.len() == 1, "Received unexpected number of results"); - let dkg = dkg_res.first().unwrap(); - match dkg { - OperationResult::Dkg(aggregate_key) => { - println!("Received aggregate group key: {aggregate_key}"); - } - OperationResult::Sign(signature) => { - panic!( - "Received unexpected signature ({},{})", - &signature.R, &signature.z, - ); - } - OperationResult::SignTaproot(schnorr_proof) => { - panic!( - "Received unexpected schnorr proof ({},{})", - &schnorr_proof.r, &schnorr_proof.s, - ); - } - OperationResult::DkgError(dkg_error) => { - panic!("Received DkgError {}", dkg_error); - } - OperationResult::SignError(sign_error) => { - panic!("Received SignError {}", sign_error); - } - } -} - -// Process a Sign result -fn process_sign_result(sign_res: &[OperationResult]) { - assert!(sign_res.len() == 1, "Received unexpected number of results"); - let sign = sign_res.first().unwrap(); - match sign { - OperationResult::Dkg(aggregate_key) => { - panic!("Received unexpected aggregate group key: {aggregate_key}"); - } - OperationResult::Sign(signature) => { - panic!( - "Received bood signature ({},{})", - &signature.R, &signature.z, + hexed_chunk.len() - bytes ); } - OperationResult::SignTaproot(schnorr_proof) => { - panic!( - "Received unexpected schnorr proof ({},{})", - &schnorr_proof.r, &schnorr_proof.s, - ); - } - OperationResult::DkgError(dkg_error) => { - panic!("Received DkgError {}", dkg_error); - } - OperationResult::SignError(sign_error) => { - panic!("Received SignError {}", sign_error); - } } } @@ -176,7 +87,9 @@ fn handle_list_chunks(args: StackerDBArgs) { debug!("Listing chunks..."); let mut session = stackerdb_session(&args.host, args.contract); let chunk_list = session.list_chunks().unwrap(); - println!("{}", serde_json::to_string(&chunk_list).unwrap()); + let chunk_list_json = serde_json::to_string(&chunk_list).unwrap(); + let hexed_json = to_hex(chunk_list_json.as_bytes()); + println!("{}", hexed_json); } fn handle_put_chunk(args: PutChunkArgs) { @@ -188,118 +101,13 @@ fn handle_put_chunk(args: PutChunkArgs) { println!("{}", serde_json::to_string(&chunk_ack).unwrap()); } -fn handle_dkg(args: RunDkgArgs) { - debug!("Running DKG..."); - let spawned_signer = spawn_running_signer(&args.config); - let dkg_command = RunLoopCommand { - reward_cycle: args.reward_cycle, - command: SignerCommand::Dkg, - }; - spawned_signer.cmd_send.send(dkg_command).unwrap(); - let dkg_res = spawned_signer.res_recv.recv().unwrap(); - process_dkg_result(&dkg_res); - spawned_signer.running_signer.stop(); -} - -fn handle_sign(args: SignArgs) { - debug!("Signing message..."); - let spawned_signer = spawn_running_signer(&args.config); - let Some(block) = read_next::(&mut &args.data[..]).ok() else { - error!("Unable to parse provided message as a NakamotoBlock."); - spawned_signer.running_signer.stop(); - return; - }; - let sign_command = RunLoopCommand { - reward_cycle: args.reward_cycle, - command: SignerCommand::Sign { - block, - is_taproot: false, - merkle_root: None, - }, - }; - spawned_signer.cmd_send.send(sign_command).unwrap(); - let sign_res = spawned_signer.res_recv.recv().unwrap(); - process_sign_result(&sign_res); - spawned_signer.running_signer.stop(); -} - -fn handle_dkg_sign(args: SignArgs) { - debug!("Running DKG and signing message..."); - let spawned_signer = spawn_running_signer(&args.config); - let Some(block) = read_next::(&mut &args.data[..]).ok() else { - error!("Unable to parse provided message as a NakamotoBlock."); - spawned_signer.running_signer.stop(); - return; - }; - let dkg_command = RunLoopCommand { - reward_cycle: args.reward_cycle, - command: SignerCommand::Dkg, - }; - let sign_command = RunLoopCommand { - reward_cycle: args.reward_cycle, - command: SignerCommand::Sign { - block, - is_taproot: false, - merkle_root: None, - }, - }; - // First execute DKG, then sign - spawned_signer.cmd_send.send(dkg_command).unwrap(); - spawned_signer.cmd_send.send(sign_command).unwrap(); - let dkg_res = spawned_signer.res_recv.recv().unwrap(); - process_dkg_result(&dkg_res); - let sign_res = spawned_signer.res_recv.recv().unwrap(); - process_sign_result(&sign_res); - spawned_signer.running_signer.stop(); -} - fn handle_run(args: RunSignerArgs) { debug!("Running signer..."); - let spawned_signer = spawn_running_signer(&args.config); + let config = GlobalConfig::try_from(&args.config).unwrap(); + let spawned_signer = SpawnedSigner::new(config); println!("Signer spawned successfully. Waiting for messages to process..."); // Wait for the spawned signer to stop (will only occur if an error occurs) - let _ = spawned_signer.running_signer.join(); -} - -fn handle_generate_files(args: GenerateFilesArgs) { - debug!("Generating files..."); - let signer_stacks_private_keys = if let Some(path) = args.private_keys { - let file = File::open(path).unwrap(); - let reader = io::BufReader::new(file); - - let private_keys: Vec = reader.lines().collect::>().unwrap(); - println!("{}", StacksPrivateKey::new().to_hex()); - let private_keys = private_keys - .iter() - .map(|key| StacksPrivateKey::from_hex(key).expect("Failed to parse private key.")) - .collect::>(); - if private_keys.is_empty() { - panic!("Private keys file is empty."); - } - private_keys - } else { - let num_signers = args.num_signers.unwrap(); - if num_signers == 0 { - panic!("--num-signers must be non-zero."); - } - (0..num_signers) - .map(|_| StacksPrivateKey::new()) - .collect::>() - }; - - let signer_config_tomls = build_signer_config_tomls( - &signer_stacks_private_keys, - &args.host.to_string(), - args.timeout.map(Duration::from_millis), - &args.network, - &args.password, - rand::random(), - 3000, - ); - debug!("Built {:?} signer config tomls.", signer_config_tomls.len()); - for (i, file_contents) in signer_config_tomls.iter().enumerate() { - write_file(&args.dir, &format!("signer-{}.toml", i), file_contents); - } + let _ = spawned_signer.join(); } fn handle_generate_stacking_signature( @@ -309,7 +117,8 @@ fn handle_generate_stacking_signature( let config = GlobalConfig::try_from(&args.config).unwrap(); let private_key = config.stacks_private_key; - let public_key = Secp256k1PublicKey::from_private(&private_key); + let public_key = StacksPublicKey::from_private(&private_key); + let pk_hex = to_hex(&public_key.to_bytes_compressed()); let signature = make_pox_4_signer_key_signature( &args.pox_address, @@ -325,7 +134,7 @@ fn handle_generate_stacking_signature( let output_str = if args.json { serde_json::to_string(&serde_json::json!({ - "signerKey": to_hex(&public_key.to_bytes_compressed()), + "signerKey": pk_hex, "signerSignature": to_hex(signature.to_rsv().as_slice()), "authId": format!("{}", args.auth_id), "rewardCycle": args.reward_cycle, @@ -338,7 +147,7 @@ fn handle_generate_stacking_signature( } else { format!( "Signer Public Key: 0x{}\nSigner Key Signature: 0x{}\n\n", - to_hex(&public_key.to_bytes_compressed()), + pk_hex, to_hex(signature.to_rsv().as_slice()) // RSV is needed for Clarity ) }; @@ -355,13 +164,28 @@ fn handle_check_config(args: RunSignerArgs) { println!("Config: {}", config); } -/// Helper function for writing the given contents to filename in the given directory -fn write_file(dir: &Path, filename: &str, contents: &str) { - let file_path = dir.join(filename); - let filename = file_path.to_str().unwrap(); - let mut file = File::create(filename).unwrap(); - file.write_all(contents.as_bytes()).unwrap(); - println!("Created file: {}", filename); +fn handle_generate_vote(args: GenerateVoteArgs, do_print: bool) -> MessageSignature { + let config = GlobalConfig::try_from(&args.config).unwrap(); + let message_signature = args.vote_info.sign(&config.stacks_private_key).unwrap(); + if do_print { + println!("{}", to_hex(message_signature.as_bytes())); + } + message_signature +} + +fn handle_verify_vote(args: VerifyVoteArgs, do_print: bool) -> bool { + let valid_vote = args + .vote_info + .verify(&args.public_key, &args.signature) + .unwrap(); + if do_print { + if valid_vote { + println!("Valid vote"); + } else { + println!("Invalid vote"); + } + } + valid_vote } fn main() { @@ -385,27 +209,21 @@ fn main() { Command::PutChunk(args) => { handle_put_chunk(args); } - Command::Dkg(args) => { - handle_dkg(args); - } - Command::DkgSign(args) => { - handle_dkg_sign(args); - } - Command::Sign(args) => { - handle_sign(args); - } Command::Run(args) => { handle_run(args); } - Command::GenerateFiles(args) => { - handle_generate_files(args); - } Command::GenerateStackingSignature(args) => { handle_generate_stacking_signature(args, true); } Command::CheckConfig(args) => { handle_check_config(args); } + Command::GenerateVote(args) => { + handle_generate_vote(args, true); + } + Command::VerifyVote(args) => { + handle_verify_vote(args, true); + } } } @@ -416,15 +234,18 @@ pub mod tests { use blockstack_lib::util_lib::signed_structured_data::pox4::{ make_pox_4_signer_key_message_hash, Pox4SignatureTopic, }; + use clarity::util::secp256k1::Secp256k1PrivateKey; use clarity::vm::{execute_v2, Value}; + use rand::{Rng, RngCore}; use stacks_common::consts::CHAIN_ID_TESTNET; use stacks_common::types::PublicKey; use stacks_common::util::secp256k1::Secp256k1PublicKey; - use stacks_signer::cli::parse_pox_addr; + use stacks_signer::cli::{parse_pox_addr, VerifyVoteArgs, Vote, VoteInfo}; use super::{handle_generate_stacking_signature, *}; use crate::{GenerateStackingSignatureArgs, GlobalConfig}; + #[allow(clippy::too_many_arguments)] fn call_verify_signer_sig( pox_addr: &PoxAddress, reward_cycle: u128, @@ -549,4 +370,80 @@ pub mod tests { assert!(verify_result.is_ok()); assert!(verify_result.unwrap()); } + + #[test] + fn test_vote() { + let mut rand = rand::thread_rng(); + let vote_info = VoteInfo { + vote: rand.gen_range(0..2).try_into().unwrap(), + sip: rand.next_u32(), + }; + let config_file = "./src/tests/conf/signer-0.toml"; + let config = GlobalConfig::load_from_file(config_file).unwrap(); + let private_key = config.stacks_private_key; + let public_key = StacksPublicKey::from_private(&private_key); + let args = GenerateVoteArgs { + config: config_file.into(), + vote_info, + }; + let message_signature = handle_generate_vote(args, false); + assert!( + vote_info.verify(&public_key, &message_signature).unwrap(), + "Vote should be valid" + ); + } + + #[test] + fn test_verify_vote() { + let mut rand = rand::thread_rng(); + let private_key = Secp256k1PrivateKey::new(); + let public_key = StacksPublicKey::from_private(&private_key); + + let invalid_private_key = Secp256k1PrivateKey::new(); + let invalid_public_key = StacksPublicKey::from_private(&invalid_private_key); + + let sip = rand.next_u32(); + let vote_info = VoteInfo { + vote: Vote::No, + sip, + }; + + let args = VerifyVoteArgs { + public_key, + signature: vote_info.sign(&private_key).unwrap(), + vote_info, + }; + let valid = handle_verify_vote(args, false); + assert!(valid, "Vote should be valid"); + + let args = VerifyVoteArgs { + public_key: invalid_public_key, + signature: vote_info.sign(&private_key).unwrap(), // Invalid corresponding public key + vote_info, + }; + let valid = handle_verify_vote(args, false); + assert!(!valid, "Vote should be invalid"); + + let args = VerifyVoteArgs { + public_key, + signature: vote_info.sign(&private_key).unwrap(), + vote_info: VoteInfo { + vote: Vote::Yes, // Invalid vote + sip, + }, + }; + let valid = handle_verify_vote(args, false); + assert!(!valid, "Vote should be invalid"); + + let args = VerifyVoteArgs { + public_key, + signature: vote_info.sign(&private_key).unwrap(), + vote_info: VoteInfo { + vote: Vote::No, + sip: sip.wrapping_add(1), // Invalid sip number + }, + }; + let valid = handle_verify_vote(args, false); + assert!(!valid, "Vote should be invalid"); + } } diff --git a/stacks-signer/src/monitoring/mod.rs b/stacks-signer/src/monitoring/mod.rs new file mode 100644 index 00000000000..0ecc99b5f85 --- /dev/null +++ b/stacks-signer/src/monitoring/mod.rs @@ -0,0 +1,188 @@ +// Copyright (C) 2013-2020 Blockstack PBC, a public benefit corporation +// Copyright (C) 2020-2024 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +#[cfg(feature = "monitoring_prom")] +use ::prometheus::HistogramTimer; +#[cfg(feature = "monitoring_prom")] +use slog::slog_error; +#[cfg(not(feature = "monitoring_prom"))] +use slog::slog_warn; +#[cfg(feature = "monitoring_prom")] +use stacks_common::error; +#[cfg(not(feature = "monitoring_prom"))] +use stacks_common::warn; + +use crate::config::GlobalConfig; + +#[cfg(feature = "monitoring_prom")] +mod prometheus; + +#[cfg(feature = "monitoring_prom")] +mod server; + +/// Update stacks tip height gauge +#[allow(unused_variables)] +pub fn update_stacks_tip_height(height: i64) { + #[cfg(feature = "monitoring_prom")] + prometheus::STACKS_TIP_HEIGHT_GAUGE.set(height); +} + +/// Update the current reward cycle +#[allow(unused_variables)] +pub fn update_reward_cycle(reward_cycle: i64) { + #[cfg(feature = "monitoring_prom")] + prometheus::CURRENT_REWARD_CYCLE.set(reward_cycle); +} + +/// Increment the block validation responses counter +#[allow(unused_variables)] +pub fn increment_block_validation_responses(accepted: bool) { + #[cfg(feature = "monitoring_prom")] + { + let label_value = if accepted { "accepted" } else { "rejected" }; + prometheus::BLOCK_VALIDATION_RESPONSES + .with_label_values(&[label_value]) + .inc(); + } +} + +/// Increment the block responses sent counter +#[allow(unused_variables)] +pub fn increment_block_responses_sent(accepted: bool) { + #[cfg(feature = "monitoring_prom")] + { + let label_value = if accepted { "accepted" } else { "rejected" }; + prometheus::BLOCK_RESPONSES_SENT + .with_label_values(&[label_value]) + .inc(); + } +} + +/// Increment the signer inbound messages counter +#[allow(unused_variables)] +pub fn increment_signer_inbound_messages(amount: i64) { + #[cfg(feature = "monitoring_prom")] + prometheus::SIGNER_INBOUND_MESSAGES.inc_by(amount); +} + +/// Increment the coordinator inbound messages counter +#[allow(unused_variables)] +pub fn increment_coordinator_inbound_messages(amount: i64) { + #[cfg(feature = "monitoring_prom")] + prometheus::COORDINATOR_INBOUND_MESSAGES.inc_by(amount); +} + +/// Increment the number of inbound packets received +#[allow(unused_variables)] +pub fn increment_inbound_packets(amount: i64) { + #[cfg(feature = "monitoring_prom")] + prometheus::INBOUND_PACKETS_RECEIVED.inc_by(amount); +} + +/// Increment the number of commands processed +#[allow(unused_variables)] +pub fn increment_commands_processed(command_type: &str) { + #[cfg(feature = "monitoring_prom")] + prometheus::COMMANDS_PROCESSED + .with_label_values(&[command_type]) + .inc(); +} + +/// Increment the number of DKG votes submitted +#[allow(unused_variables)] +pub fn increment_dkg_votes_submitted() { + #[cfg(feature = "monitoring_prom")] + prometheus::DGK_VOTES_SUBMITTED.inc(); +} + +/// Increment the number of commands processed +#[allow(unused_variables)] +pub fn increment_operation_results(operation_type: &str) { + #[cfg(feature = "monitoring_prom")] + prometheus::OPERATION_RESULTS + .with_label_values(&[operation_type]) + .inc(); +} + +/// Increment the number of block proposals received +#[allow(unused_variables)] +pub fn increment_block_proposals_received() { + #[cfg(feature = "monitoring_prom")] + prometheus::BLOCK_PROPOSALS_RECEIVED.inc(); +} + +/// Update the stx balance of the signer +#[allow(unused_variables)] +pub fn update_signer_stx_balance(balance: i64) { + #[cfg(feature = "monitoring_prom")] + prometheus::SIGNER_STX_BALANCE.set(balance); +} + +/// Update the signer nonce metric +#[allow(unused_variables)] +pub fn update_signer_nonce(nonce: u64) { + #[cfg(feature = "monitoring_prom")] + prometheus::SIGNER_NONCE.set(nonce as i64); +} + +/// Start a new RPC call timer. +/// The `origin` parameter is the base path of the RPC call, e.g. `http://node.com`. +/// The `origin` parameter is removed from `full_path` when storing in prometheus. +#[cfg(feature = "monitoring_prom")] +pub fn new_rpc_call_timer(full_path: &str, origin: &str) -> HistogramTimer { + let path = &full_path[origin.len()..]; + let histogram = prometheus::SIGNER_RPC_CALL_LATENCIES_HISTOGRAM.with_label_values(&[path]); + histogram.start_timer() +} + +/// NoOp timer uses for monitoring when the monitoring feature is not enabled. +pub struct NoOpTimer; +impl NoOpTimer { + /// NoOp method to stop recording when the monitoring feature is not enabled. + pub fn stop_and_record(&self) {} +} + +/// Stop and record the no-op timer. +#[cfg(not(feature = "monitoring_prom"))] +pub fn new_rpc_call_timer(_full_path: &str, _origin: &str) -> NoOpTimer { + NoOpTimer +} + +/// Start serving monitoring metrics. +/// This will only serve the metrics if the `monitoring_prom` feature is enabled. +#[allow(unused_variables)] +pub fn start_serving_monitoring_metrics(config: GlobalConfig) -> Result<(), String> { + #[cfg(feature = "monitoring_prom")] + { + if config.metrics_endpoint.is_none() { + return Ok(()); + } + let thread = std::thread::Builder::new() + .name("signer_metrics".to_string()) + .spawn(move || { + if let Err(monitoring_err) = server::MonitoringServer::start(&config) { + error!("Monitoring: Error in metrics server: {:?}", monitoring_err); + } + }); + } + #[cfg(not(feature = "monitoring_prom"))] + { + if config.metrics_endpoint.is_some() { + warn!("Not starting monitoring metrics server as the monitoring_prom feature is not enabled"); + } + } + Ok(()) +} diff --git a/stacks-signer/src/monitoring/prometheus.rs b/stacks-signer/src/monitoring/prometheus.rs new file mode 100644 index 00000000000..c78db1299d7 --- /dev/null +++ b/stacks-signer/src/monitoring/prometheus.rs @@ -0,0 +1,105 @@ +// Copyright (C) 2013-2020 Blockstack PBC, a public benefit corporation +// Copyright (C) 2020-2024 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use lazy_static::lazy_static; +use prometheus::{ + gather, histogram_opts, opts, register_histogram_vec, register_int_counter, + register_int_counter_vec, register_int_gauge, Encoder, HistogramVec, IntCounter, IntCounterVec, + IntGauge, TextEncoder, +}; + +lazy_static! { + pub static ref STACKS_TIP_HEIGHT_GAUGE: IntGauge = register_int_gauge!(opts!( + "stacks_signer_stacks_node_height", + "The current height of the Stacks node" + )) + .unwrap(); + pub static ref BLOCK_VALIDATION_RESPONSES: IntCounterVec = register_int_counter_vec!( + "stacks_signer_block_validation_responses", + "The number of block validation responses. `response_type` is either 'accepted' or 'rejected'", + &["response_type"] + ) + .unwrap(); + pub static ref BLOCK_RESPONSES_SENT: IntCounterVec = register_int_counter_vec!( + "stacks_signer_block_responses_sent", + "The number of block responses sent. `response_type` is either 'accepted' or 'rejected'", + &["response_type"] + ) + .unwrap(); + pub static ref SIGNER_INBOUND_MESSAGES: IntCounter = register_int_counter!(opts!( + "stacks_signer_inbound_messages", + "The number of inbound messages received by the signer" + )) + .unwrap(); + pub static ref COORDINATOR_INBOUND_MESSAGES: IntCounter = register_int_counter!(opts!( + "stacks_signer_coordinator_inbound_messages", + "The number of inbound messages received as a coordinator" + )) + .unwrap(); + pub static ref INBOUND_PACKETS_RECEIVED: IntCounter = register_int_counter!(opts!( + "stacks_signer_inbound_packets_received", + "The number of inbound packets received by the signer" + )) + .unwrap(); + pub static ref COMMANDS_PROCESSED: IntCounterVec = register_int_counter_vec!( + "stacks_signer_commands_processed", + "The number of commands processed by the signer", + &["command_type"] + ) + .unwrap(); + pub static ref DGK_VOTES_SUBMITTED: IntCounter = register_int_counter!(opts!( + "stacks_signer_dgk_votes_submitted", + "The number of DGK votes submitted by the signer" + )) + .unwrap(); + pub static ref OPERATION_RESULTS: IntCounterVec = register_int_counter_vec!( + "stacks_signer_operation_results_dkg", + "The number of DKG operation results", + &["operation_type"] + ) + .unwrap(); + pub static ref BLOCK_PROPOSALS_RECEIVED: IntCounter = register_int_counter!(opts!( + "stacks_signer_block_proposals_received", + "The number of block proposals received by the signer" + )) + .unwrap(); + pub static ref CURRENT_REWARD_CYCLE: IntGauge = register_int_gauge!(opts!( + "stacks_signer_current_reward_cycle", + "The current reward cycle" + )).unwrap(); + pub static ref SIGNER_STX_BALANCE: IntGauge = register_int_gauge!(opts!( + "stacks_signer_stx_balance", + "The current STX balance of the signer" + )).unwrap(); + pub static ref SIGNER_NONCE: IntGauge = register_int_gauge!(opts!( + "stacks_signer_nonce", + "The current nonce of the signer" + )).unwrap(); + + pub static ref SIGNER_RPC_CALL_LATENCIES_HISTOGRAM: HistogramVec = register_histogram_vec!(histogram_opts!( + "stacks_signer_node_rpc_call_latencies_histogram", + "Time (seconds) measuring round-trip RPC call latency to the Stacks node" + // Will use DEFAULT_BUCKETS = [0.005, 0.01, 0.025, 0.05, 0.1, 0.25, 0.5, 1.0, 2.5, 5.0, 10.0] by default + ), &["path"]).unwrap(); +} + +pub fn gather_metrics_string() -> String { + let mut buffer = Vec::new(); + let encoder = TextEncoder::new(); + let metrics_families = gather(); + encoder.encode(&metrics_families, &mut buffer).unwrap(); + String::from_utf8(buffer).unwrap() +} diff --git a/stacks-signer/src/monitoring/server.rs b/stacks-signer/src/monitoring/server.rs new file mode 100644 index 00000000000..ffde008c9ff --- /dev/null +++ b/stacks-signer/src/monitoring/server.rs @@ -0,0 +1,250 @@ +// Copyright (C) 2013-2020 Blockstack PBC, a public benefit corporation +// Copyright (C) 2020-2024 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use std::net::SocketAddr; +use std::time::Instant; + +use clarity::util::hash::to_hex; +use clarity::util::secp256k1::Secp256k1PublicKey; +use slog::{slog_debug, slog_error, slog_info, slog_warn}; +use stacks_common::{debug, error, info, warn}; +use tiny_http::{Response as HttpResponse, Server as HttpServer}; + +use super::{update_reward_cycle, update_signer_stx_balance}; +use crate::client::{ClientError, StacksClient}; +use crate::config::{GlobalConfig, Network}; +use crate::monitoring::prometheus::gather_metrics_string; +use crate::monitoring::{update_signer_nonce, update_stacks_tip_height}; + +#[derive(thiserror::Error, Debug)] +/// Monitoring server errors +pub enum MonitoringError { + /// Already bound to an address + #[error("Already bound to an address")] + AlreadyBound, + /// Server terminated + #[error("Server terminated")] + Terminated, + /// No endpoint configured + #[error("Prometheus endpoint not configured.")] + EndpointNotConfigured, + /// Error fetching metrics from stacks node + #[error("Error fetching data from stacks node: {0}")] + FetchError(#[from] ClientError), +} + +/// Metrics and monitoring server +pub struct MonitoringServer { + http_server: HttpServer, + local_addr: SocketAddr, + stacks_client: StacksClient, + last_metrics_poll: Instant, + network: Network, + public_key: Secp256k1PublicKey, + stacks_node_client: reqwest::blocking::Client, + stacks_node_origin: String, +} + +impl MonitoringServer { + pub fn new( + http_server: HttpServer, + local_addr: SocketAddr, + stacks_client: StacksClient, + network: Network, + public_key: Secp256k1PublicKey, + stacks_node_origin: String, + ) -> Self { + Self { + http_server, + local_addr, + stacks_client, + last_metrics_poll: Instant::now(), + network, + public_key, + stacks_node_client: reqwest::blocking::Client::new(), + stacks_node_origin, + } + } + + /// Start and run the metrics server + pub fn start(config: &GlobalConfig) -> Result<(), MonitoringError> { + let Some(endpoint) = config.metrics_endpoint else { + return Err(MonitoringError::EndpointNotConfigured); + }; + let stacks_client = StacksClient::from(config); + let http_server = HttpServer::http(endpoint).map_err(|_| MonitoringError::AlreadyBound)?; + let public_key = Secp256k1PublicKey::from_private(&config.stacks_private_key); + let mut server = MonitoringServer::new( + http_server, + endpoint, + stacks_client, + config.network.clone(), + public_key, + format!("http://{}", config.node_host), + ); + if let Err(e) = server.update_metrics() { + warn!( + "Monitoring: Error updating metrics when starting server: {:?}", + e + ); + }; + server.main_loop() + } + + // /// Start and run the metrics server + // pub fn run(endpoint: SocketAddr, stacks_client: StacksClient) -> Result<(), MonitoringError> { + // let http_server = HttpServer::http(endpoint).map_err(|_| MonitoringError::AlreadyBound)?; + // let mut server = PrometheusMetrics::new(http_server, endpoint, stacks_client); + // server.main_loop() + // } + + /// Main listener loop of metrics server + pub fn main_loop(&mut self) -> Result<(), MonitoringError> { + info!("{}: Starting Prometheus metrics server", self); + loop { + if let Err(err) = self.refresh_metrics() { + error!("Monitoring: Error refreshing metrics: {:?}", err); + } + let request = match self.http_server.recv() { + Ok(request) => request, + Err(err) => { + error!("Monitoring: Error receiving request: {:?}", err); + return Err(MonitoringError::Terminated); + } + }; + + debug!("{}: received request {}", self, request.url()); + + if request.url() == "/metrics" { + let response = HttpResponse::from_string(gather_metrics_string()); + request.respond(response).expect("Failed to send response"); + continue; + } + + if request.url() == "/info" { + request + .respond(HttpResponse::from_string(self.get_info_response())) + .expect("Failed to respond to request"); + continue; + } + + // return 200 OK for "/" + if request.url() == "/" { + request + .respond(HttpResponse::from_string("OK")) + .expect("Failed to respond to request"); + continue; + } + + // Run heartbeat check to test connection to the node + if request.url() == "/heartbeat" { + let (msg, status) = if self.heartbeat() { + ("OK", 200) + } else { + ("Failed", 500) + }; + request + .respond(HttpResponse::from_string(msg).with_status_code(status)) + .expect("Failed to respond to request"); + continue; + } + + // unknown request, return 404 + request + .respond(HttpResponse::from_string("Not Found").with_status_code(404)) + .expect("Failed to respond to request"); + } + } + + /// Check to see if metrics need to be refreshed + fn refresh_metrics(&mut self) -> Result<(), MonitoringError> { + let now = Instant::now(); + if now.duration_since(self.last_metrics_poll).as_secs() > 60 { + self.last_metrics_poll = now; + self.update_metrics()?; + } + Ok(()) + } + + /// Update metrics by making RPC calls to the Stacks node + fn update_metrics(&self) -> Result<(), MonitoringError> { + debug!("{}: Updating metrics", self); + let peer_info = self.stacks_client.get_peer_info()?; + if let Ok(height) = i64::try_from(peer_info.stacks_tip_height) { + update_stacks_tip_height(height); + } else { + warn!( + "Failed to parse stacks tip height: {}", + peer_info.stacks_tip_height + ); + } + let pox_info = self.stacks_client.get_pox_data()?; + if let Ok(reward_cycle) = i64::try_from(pox_info.reward_cycle_id) { + update_reward_cycle(reward_cycle); + } + let signer_stx_addr = self.stacks_client.get_signer_address(); + let account_entry = self.stacks_client.get_account_entry(signer_stx_addr)?; + let balance = i64::from_str_radix(&account_entry.balance[2..], 16).map_err(|e| { + MonitoringError::FetchError(ClientError::MalformedClarityValue(format!( + "Failed to parse balance: {} with err: {}", + &account_entry.balance, e, + ))) + })?; + update_signer_nonce(account_entry.nonce); + update_signer_stx_balance(balance); + Ok(()) + } + + /// Build a JSON response for non-metrics requests + fn get_info_response(&self) -> String { + // let public_key = Secp256k1PublicKey::from_private(&self.stacks_client.publ); + serde_json::to_string(&serde_json::json!({ + "signerPublicKey": to_hex(&self.public_key.to_bytes_compressed()), + "network": self.network.to_string(), + "stxAddress": self.stacks_client.get_signer_address().to_string(), + })) + .expect("Failed to serialize JSON") + } + + /// Poll the Stacks node's `v2/info` endpoint to validate the connection + fn heartbeat(&self) -> bool { + let url = format!("{}/v2/info", self.stacks_node_origin); + let response = self.stacks_node_client.get(url).send(); + match response { + Ok(response) => { + if response.status().is_success() { + true + } else { + warn!( + "Monitoring: Heartbeat failed with status: {}", + response.status() + ); + false + } + } + Err(err) => { + warn!("Monitoring: Heartbeat failed with error: {:?}", err); + false + } + } + } +} + +impl std::fmt::Display for MonitoringServer { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "Signer monitoring server ({})", self.local_addr) + } +} diff --git a/stacks-signer/src/runloop.rs b/stacks-signer/src/runloop.rs index 44916500901..cd8bf5972df 100644 --- a/stacks-signer/src/runloop.rs +++ b/stacks-signer/src/runloop.rs @@ -1,4 +1,5 @@ use std::collections::VecDeque; +use std::fmt::Debug; // Copyright (C) 2013-2020 Blockstack PBC, a public benefit corporation // Copyright (C) 2020-2024 Stacks Open Internet Foundation // @@ -17,19 +18,66 @@ use std::collections::VecDeque; use std::sync::mpsc::Sender; use std::time::Duration; -use blockstack_lib::burnchains::PoxConstants; use blockstack_lib::chainstate::stacks::boot::SIGNERS_NAME; use blockstack_lib::util_lib::boot::boot_code_id; +use clarity::codec::StacksMessageCodec; use hashbrown::HashMap; -use libsigner::{SignerEntries, SignerEvent, SignerRunLoop}; +use libsigner::{BlockProposal, SignerEntries, SignerEvent, SignerRunLoop}; use slog::{slog_debug, slog_error, slog_info, slog_warn}; use stacks_common::types::chainstate::StacksAddress; use stacks_common::{debug, error, info, warn}; +use wsts::common::MerkleRoot; use wsts::state_machine::OperationResult; -use crate::client::{retry_with_exponential_backoff, ClientError, StacksClient}; +use crate::chainstate::SortitionsView; +use crate::client::{retry_with_exponential_backoff, ClientError, SignerSlotID, StacksClient}; use crate::config::{GlobalConfig, SignerConfig}; -use crate::signer::{Command as SignerCommand, Signer, SignerSlotID}; +use crate::Signer as SignerTrait; + +/// The internal signer state info +#[derive(PartialEq, Clone, Debug)] +pub struct StateInfo { + /// the runloop state + pub runloop_state: State, + /// the current reward cycle info + pub reward_cycle_info: Option, +} + +/// The signer result that can be sent across threads +pub enum SignerResult { + /// The signer has received a status check + StatusCheck(StateInfo), + /// The signer has completed an operation + OperationResult(OperationResult), +} + +impl From for SignerResult { + fn from(result: OperationResult) -> Self { + SignerResult::OperationResult(result) + } +} + +impl From for SignerResult { + fn from(state_info: StateInfo) -> Self { + SignerResult::StatusCheck(state_info) + } +} + +/// Which signer operation to perform +#[derive(PartialEq, Clone, Debug)] +pub enum SignerCommand { + /// Generate a DKG aggregate public key + Dkg, + /// Sign a message + Sign { + /// The block to sign over + block_proposal: BlockProposal, + /// Whether to make a taproot signature + is_taproot: bool, + /// Taproot merkle root + merkle_root: Option, + }, +} /// Which operation to perform #[derive(PartialEq, Clone, Debug)] @@ -56,8 +104,8 @@ pub enum State { pub struct RewardCycleInfo { /// The current reward cycle pub reward_cycle: u64, - /// The reward phase cycle length - pub reward_phase_block_length: u64, + /// The total reward cycle length + pub reward_cycle_length: u64, /// The prepare phase length pub prepare_phase_block_length: u64, /// The first burn block height @@ -70,44 +118,104 @@ impl RewardCycleInfo { /// Check if the provided burnchain block height is part of the reward cycle pub const fn is_in_reward_cycle(&self, burnchain_block_height: u64) -> bool { let blocks_mined = burnchain_block_height.saturating_sub(self.first_burnchain_block_height); - let reward_cycle_length = self - .reward_phase_block_length - .saturating_add(self.prepare_phase_block_length); - let reward_cycle = blocks_mined / reward_cycle_length; + let reward_cycle = blocks_mined / self.reward_cycle_length; self.reward_cycle == reward_cycle } - /// Check if the provided burnchain block height is in the prepare phase - pub fn is_in_prepare_phase(&self, burnchain_block_height: u64) -> bool { - PoxConstants::static_is_in_prepare_phase( - self.first_burnchain_block_height, - self.reward_phase_block_length, - self.prepare_phase_block_length, - burnchain_block_height, - ) + /// Get the reward cycle for a specific burnchain block height + pub const fn get_reward_cycle(&self, burnchain_block_height: u64) -> u64 { + let blocks_mined = burnchain_block_height.saturating_sub(self.first_burnchain_block_height); + blocks_mined / self.reward_cycle_length + } + + /// Check if the provided burnchain block height is in the prepare phase of the next cycle + pub fn is_in_next_prepare_phase(&self, burnchain_block_height: u64) -> bool { + let effective_height = burnchain_block_height - self.first_burnchain_block_height; + let reward_index = effective_height % self.reward_cycle_length; + + reward_index >= (self.reward_cycle_length - self.prepare_phase_block_length) + && self.get_reward_cycle(burnchain_block_height) == self.reward_cycle + } +} + +/// The configuration state for a reward cycle. +/// Allows us to track if we've registered a signer for a cycle or not +/// and to differentiate between being unregistered and simply not configured +pub enum ConfiguredSigner +where + Signer: SignerTrait, + T: StacksMessageCodec + Clone + Send + Debug, +{ + /// Signer is registered for the cycle and ready to process messages + RegisteredSigner(Signer), + /// The signer runloop isn't registered for this cycle (i.e., we've checked the + /// the signer set and we're not in it) + NotRegistered { + /// the cycle number we're not registered for + cycle: u64, + /// Phantom data for the message codec + _phantom_state: std::marker::PhantomData, + }, +} + +impl, T: StacksMessageCodec + Clone + Send + Debug> std::fmt::Display + for ConfiguredSigner +{ + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match self { + Self::RegisteredSigner(s) => write!(f, "{s}"), + Self::NotRegistered { cycle, .. } => write!(f, "NotRegistered in Cycle #{cycle}"), + } + } +} + +impl, T: StacksMessageCodec + Clone + Send + Debug> + ConfiguredSigner +{ + /// Create a `NotRegistered` instance of the enum (so that callers do not need + /// to supply phantom_state data). + pub fn not_registered(cycle: u64) -> Self { + Self::NotRegistered { + cycle, + _phantom_state: std::marker::PhantomData, + } + } + + /// The reward cycle this signer is configured for + pub fn reward_cycle(&self) -> u64 { + match self { + ConfiguredSigner::RegisteredSigner(s) => s.reward_cycle(), + ConfiguredSigner::NotRegistered { cycle, .. } => *cycle, + } } } /// The runloop for the stacks signer -pub struct RunLoop { +pub struct RunLoop +where + Signer: SignerTrait, + T: StacksMessageCodec + Clone + Send + Debug, +{ /// Configuration info pub config: GlobalConfig, /// The stacks node client pub stacks_client: StacksClient, /// The internal signer for an odd or even reward cycle /// Keyed by reward cycle % 2 - pub stacks_signers: HashMap, + pub stacks_signers: HashMap>, /// The state of the runloop pub state: State, /// The commands received thus far pub commands: VecDeque, /// The current reward cycle info. Only None if the runloop is uninitialized pub current_reward_cycle_info: Option, + /// Cache sortitin data from `stacks-node` + pub sortition_state: Option, } -impl From for RunLoop { - /// Creates new runloop from a config - fn from(config: GlobalConfig) -> Self { +impl, T: StacksMessageCodec + Clone + Send + Debug> RunLoop { + /// Create a new signer runloop from the provided configuration + pub fn new(config: GlobalConfig) -> Self { let stacks_client = StacksClient::from(&config); Self { config, @@ -116,11 +224,9 @@ impl From for RunLoop { state: State::Uninitialized, commands: VecDeque::new(), current_reward_cycle_info: None, + sortition_state: None, } } -} - -impl RunLoop { /// Get the registered signers for a specific reward cycle /// Returns None if no signers are registered or its not Nakamoto cycle pub fn get_parsed_reward_set( @@ -128,10 +234,7 @@ impl RunLoop { reward_cycle: u64, ) -> Result, ClientError> { debug!("Getting registered signers for reward cycle {reward_cycle}..."); - let Some(signers) = self - .stacks_client - .get_reward_set_signers_with_retry(reward_cycle)? - else { + let Some(signers) = self.stacks_client.get_reward_set_signers(reward_cycle)? else { warn!("No reward set signers found for reward cycle {reward_cycle}."); return Ok(None); }; @@ -168,25 +271,40 @@ impl RunLoop { Ok(signer_slot_ids) } /// Get a signer configuration for a specific reward cycle from the stacks node - fn get_signer_config(&mut self, reward_cycle: u64) -> Option { + fn get_signer_config( + &mut self, + reward_cycle: u64, + ) -> Result, ClientError> { // We can only register for a reward cycle if a reward set exists. - let signer_entries = self.get_parsed_reward_set(reward_cycle).ok()??; - let signer_slot_ids = self - .get_parsed_signer_slots(&self.stacks_client, reward_cycle) - .ok()?; + let signer_entries = match self.get_parsed_reward_set(reward_cycle) { + Ok(Some(x)) => x, + Ok(None) => return Ok(None), + Err(e) => { + warn!("Error while fetching reward set {reward_cycle}: {e:?}"); + return Err(e); + } + }; + let signer_slot_ids = match self.get_parsed_signer_slots(&self.stacks_client, reward_cycle) + { + Ok(x) => x, + Err(e) => { + warn!("Error while fetching stackerdb slots {reward_cycle}: {e:?}"); + return Err(e); + } + }; let current_addr = self.stacks_client.get_signer_address(); let Some(signer_slot_id) = signer_slot_ids.get(current_addr) else { warn!( "Signer {current_addr} was not found in stacker db. Must not be registered for this reward cycle {reward_cycle}." ); - return None; + return Ok(None); }; let Some(signer_id) = signer_entries.signer_ids.get(current_addr) else { warn!( "Signer {current_addr} was found in stacker db but not the reward set for reward cycle {reward_cycle}." ); - return None; + return Ok(None); }; info!( "Signer #{signer_id} ({current_addr}) is registered for reward cycle {reward_cycle}." @@ -196,13 +314,14 @@ impl RunLoop { .get(signer_id) .cloned() .unwrap_or_default(); - Some(SignerConfig { + Ok(Some(SignerConfig { reward_cycle, signer_id: *signer_id, signer_slot_id: *signer_slot_id, key_ids, signer_entries, signer_slot_ids: signer_slot_ids.into_values().collect(), + first_proposal_burn_block_timing: self.config.first_proposal_burn_block_timing, ecdsa_private_key: self.config.ecdsa_private_key, stacks_private_key: self.config.stacks_private_key, node_host: self.config.node_host.to_string(), @@ -213,39 +332,33 @@ impl RunLoop { nonce_timeout: self.config.nonce_timeout, sign_timeout: self.config.sign_timeout, tx_fee_ustx: self.config.tx_fee_ustx, + max_tx_fee_ustx: self.config.max_tx_fee_ustx, db_path: self.config.db_path.clone(), - }) + block_proposal_timeout: self.config.block_proposal_timeout, + })) } /// Refresh signer configuration for a specific reward cycle fn refresh_signer_config(&mut self, reward_cycle: u64) { let reward_index = reward_cycle % 2; - if let Some(new_signer_config) = self.get_signer_config(reward_cycle) { - let signer_id = new_signer_config.signer_id; - debug!("Signer is registered for reward cycle {reward_cycle} as signer #{signer_id}. Initializing signer state."); - if reward_cycle != 0 { - let prior_reward_cycle = reward_cycle.saturating_sub(1); - let prior_reward_set = prior_reward_cycle % 2; - if let Some(signer) = self.stacks_signers.get_mut(&prior_reward_set) { - if signer.reward_cycle == prior_reward_cycle { - // The signers have been calculated for the next reward cycle. Update the current one - debug!("{signer}: Next reward cycle ({reward_cycle}) signer set calculated. Reconfiguring current reward cycle signer."); - signer.next_signer_addresses = new_signer_config - .signer_entries - .signer_ids - .keys() - .copied() - .collect(); - signer.next_signer_slot_ids = new_signer_config.signer_slot_ids.clone(); - } - } + let new_signer_config = match self.get_signer_config(reward_cycle) { + Ok(Some(new_signer_config)) => { + let signer_id = new_signer_config.signer_id; + let new_signer = Signer::new(new_signer_config); + info!("{new_signer} Signer is registered for reward cycle {reward_cycle} as signer #{signer_id}. Initialized signer state."); + ConfiguredSigner::RegisteredSigner(new_signer) } - let new_signer = Signer::from(new_signer_config); - info!("{new_signer} initialized."); - self.stacks_signers.insert(reward_index, new_signer); - } else { - warn!("Signer is not registered for reward cycle {reward_cycle}. Waiting for confirmed registration..."); - } + Ok(None) => { + warn!("Signer is not registered for reward cycle {reward_cycle}"); + ConfiguredSigner::not_registered(reward_cycle) + } + Err(e) => { + warn!("Failed to get the reward set info: {e}. Will try again later."); + return; + } + }; + + self.stacks_signers.insert(reward_index, new_signer_config); } fn initialize_runloop(&mut self) -> Result<(), ClientError> { @@ -258,7 +371,8 @@ impl RunLoop { let current_reward_cycle = reward_cycle_info.reward_cycle; self.refresh_signer_config(current_reward_cycle); // We should only attempt to initialize the next reward cycle signer if we are in the prepare phase of the next reward cycle - if reward_cycle_info.is_in_prepare_phase(reward_cycle_info.last_burnchain_block_height) { + if reward_cycle_info.is_in_next_prepare_phase(reward_cycle_info.last_burnchain_block_height) + { self.refresh_signer_config(current_reward_cycle.saturating_add(1)); } self.current_reward_cycle_info = Some(reward_cycle_info); @@ -270,34 +384,58 @@ impl RunLoop { Ok(()) } - fn refresh_runloop(&mut self, current_burn_block_height: u64) -> Result<(), ClientError> { + fn refresh_runloop(&mut self, ev_burn_block_height: u64) -> Result<(), ClientError> { + let current_burn_block_height = std::cmp::max( + self.stacks_client.get_peer_info()?.burn_block_height, + ev_burn_block_height, + ); let reward_cycle_info = self .current_reward_cycle_info .as_mut() .expect("FATAL: cannot be an initialized signer with no reward cycle info."); + let current_reward_cycle = reward_cycle_info.reward_cycle; + let block_reward_cycle = reward_cycle_info.get_reward_cycle(current_burn_block_height); + // First ensure we refresh our view of the current reward cycle information - if !reward_cycle_info.is_in_reward_cycle(current_burn_block_height) { - let new_reward_cycle_info = retry_with_exponential_backoff(|| { - self.stacks_client - .get_current_reward_cycle_info() - .map_err(backoff::Error::transient) - })?; + if block_reward_cycle != current_reward_cycle { + let new_reward_cycle_info = RewardCycleInfo { + reward_cycle: block_reward_cycle, + reward_cycle_length: reward_cycle_info.reward_cycle_length, + prepare_phase_block_length: reward_cycle_info.prepare_phase_block_length, + first_burnchain_block_height: reward_cycle_info.first_burnchain_block_height, + last_burnchain_block_height: current_burn_block_height, + }; *reward_cycle_info = new_reward_cycle_info; } + let reward_cycle_before_refresh = current_reward_cycle; let current_reward_cycle = reward_cycle_info.reward_cycle; - // We should only attempt to refresh the signer if we are not configured for the next reward cycle yet and we received a new burn block for its prepare phase - if reward_cycle_info.is_in_prepare_phase(current_burn_block_height) { - let next_reward_cycle = current_reward_cycle.saturating_add(1); - if self - .stacks_signers - .get(&(next_reward_cycle % 2)) - .map(|signer| signer.reward_cycle != next_reward_cycle) - .unwrap_or(true) - { - info!("Received a new burnchain block height ({current_burn_block_height}) in the prepare phase of the next reward cycle ({next_reward_cycle}). Checking for signer registration..."); + let is_in_next_prepare_phase = + reward_cycle_info.is_in_next_prepare_phase(current_burn_block_height); + let next_reward_cycle = current_reward_cycle.saturating_add(1); + + info!( + "Refreshing runloop with new burn block event"; + "latest_node_burn_ht" => current_burn_block_height, + "event_ht" => ev_burn_block_height, + "reward_cycle_before_refresh" => reward_cycle_before_refresh, + "current_reward_cycle" => current_reward_cycle, + "configured_for_current" => Self::is_configured_for_cycle(&self.stacks_signers, current_reward_cycle), + "configured_for_next" => Self::is_configured_for_cycle(&self.stacks_signers, next_reward_cycle), + "is_in_next_prepare_phase" => is_in_next_prepare_phase, + ); + + // Check if we need to refresh the signers: + // need to refresh the current signer if we are not configured for the current reward cycle + // need to refresh the next signer if we're not configured for the next reward cycle, and we're in the prepare phase + if !Self::is_configured_for_cycle(&self.stacks_signers, current_reward_cycle) { + self.refresh_signer_config(current_reward_cycle); + } + if is_in_next_prepare_phase { + if !Self::is_configured_for_cycle(&self.stacks_signers, next_reward_cycle) { self.refresh_signer_config(next_reward_cycle); } } + self.cleanup_stale_signers(current_reward_cycle); if self.stacks_signers.is_empty() { self.state = State::NoRegisteredSigners; @@ -307,13 +445,35 @@ impl RunLoop { Ok(()) } + fn is_configured_for_cycle( + stacks_signers: &HashMap>, + reward_cycle: u64, + ) -> bool { + let Some(signer) = stacks_signers.get(&(reward_cycle % 2)) else { + return false; + }; + signer.reward_cycle() == reward_cycle + } + fn cleanup_stale_signers(&mut self, current_reward_cycle: u64) { let mut to_delete = Vec::new(); for (idx, signer) in &mut self.stacks_signers { - if signer.reward_cycle < current_reward_cycle { + let reward_cycle = signer.reward_cycle(); + let next_reward_cycle = reward_cycle.wrapping_add(1); + let stale = match next_reward_cycle.cmp(¤t_reward_cycle) { + std::cmp::Ordering::Less => true, // We are more than one reward cycle behind, so we are stale + std::cmp::Ordering::Equal => { + // We are the next reward cycle, so check if we were registered and have any pending blocks to process + match signer { + ConfiguredSigner::RegisteredSigner(signer) => !signer.has_pending_blocks(), + _ => true, + } + } + std::cmp::Ordering::Greater => false, // We are the current reward cycle, so we are not stale + }; + if stale { debug!("{signer}: Signer's tenure has completed."); to_delete.push(*idx); - continue; } } for idx in to_delete { @@ -322,7 +482,9 @@ impl RunLoop { } } -impl SignerRunLoop, RunLoopCommand> for RunLoop { +impl, T: StacksMessageCodec + Clone + Send + Debug> + SignerRunLoop, RunLoopCommand, T> for RunLoop +{ fn set_event_timeout(&mut self, timeout: Duration) { self.config.event_timeout = timeout; } @@ -333,14 +495,27 @@ impl SignerRunLoop, RunLoopCommand> for RunLoop { fn run_one_pass( &mut self, - event: Option, + event: Option>, cmd: Option, - res: Sender>, - ) -> Option> { + res: Sender>, + ) -> Option> { debug!( "Running one pass for the signer. state={:?}, cmd={cmd:?}, event={event:?}", self.state ); + // This is the only event that we respond to from the outer signer runloop + if let Some(SignerEvent::StatusCheck) = event { + info!("Signer status check requested: {:?}.", self.state); + if let Err(e) = res.send(vec![StateInfo { + runloop_state: self.state, + reward_cycle_info: self.current_reward_cycle_info, + } + .into()]) + { + error!("Failed to send status check result: {e}."); + } + } + if let Some(cmd) = cmd { self.commands.push_back(cmd); } @@ -352,8 +527,8 @@ impl SignerRunLoop, RunLoopCommand> for RunLoop { } return None; } - } else if let Some(SignerEvent::NewBurnBlock(current_burn_block_height)) = event { - if let Err(e) = self.refresh_runloop(current_burn_block_height) { + } else if let Some(SignerEvent::NewBurnBlock { burn_height, .. }) = event { + if let Err(e) = self.refresh_runloop(burn_height) { error!("Failed to refresh signer runloop: {e}."); warn!("Signer may have an outdated view of the network."); } @@ -363,80 +538,43 @@ impl SignerRunLoop, RunLoopCommand> for RunLoop { .as_ref() .expect("FATAL: cannot be an initialized signer with no reward cycle info.") .reward_cycle; - if self.state == State::NoRegisteredSigners { - let next_reward_cycle = current_reward_cycle.saturating_add(1); - if let Some(event) = event { - info!("Signer is not registered for the current reward cycle ({current_reward_cycle}) or next reward cycle ({next_reward_cycle}). Waiting for confirmed registration..."); - warn!("Ignoring event: {event:?}"); - } - return None; - } - for signer in self.stacks_signers.values_mut() { - let event_parity = match event { - Some(SignerEvent::BlockValidationResponse(_)) => Some(current_reward_cycle % 2), - // Block proposal events do have reward cycles, but each proposal has its own cycle, - // and the vec could be heterogenous, so, don't differentiate. - Some(SignerEvent::MinerMessages(..)) - | Some(SignerEvent::NewBurnBlock(_)) - | Some(SignerEvent::StatusCheck) - | None => None, - Some(SignerEvent::SignerMessages(msg_parity, ..)) => { - Some(u64::from(msg_parity) % 2) - } - }; - let other_signer_parity = (signer.reward_cycle + 1) % 2; - if event_parity == Some(other_signer_parity) { + for configured_signer in self.stacks_signers.values_mut() { + let ConfiguredSigner::RegisteredSigner(ref mut signer) = configured_signer else { + debug!("{configured_signer}: Not configured for cycle, ignoring events for cycle"); continue; - } + }; - if signer.approved_aggregate_public_key.is_none() { - if let Err(e) = retry_with_exponential_backoff(|| { - signer - .update_dkg(&self.stacks_client, current_reward_cycle) - .map_err(backoff::Error::transient) - }) { - error!("{signer}: failed to update DKG: {e}"); - } - } - signer.refresh_coordinator(); - if let Err(e) = signer.process_event( + signer.process_event( &self.stacks_client, + &mut self.sortition_state, event.as_ref(), res.clone(), current_reward_cycle, - ) { - error!("{signer}: errored processing event: {e}"); - } - if let Some(command) = self.commands.pop_front() { - let reward_cycle = command.reward_cycle; - if signer.reward_cycle != reward_cycle { - warn!( - "{signer}: not registered for reward cycle {reward_cycle}. Ignoring command: {command:?}" - ); - } else { - info!( - "{signer}: Queuing an external runloop command ({:?}): {command:?}", - signer - .state_machine - .public_keys - .signers - .get(&signer.signer_id) - ); - signer.commands.push_back(command.command); - } - } + ); // After processing event, run the next command for each signer - signer.process_next_command(&self.stacks_client, current_reward_cycle); + signer.process_command( + &self.stacks_client, + current_reward_cycle, + self.commands.pop_front(), + ); + } + if self.state == State::NoRegisteredSigners && event.is_some() { + let next_reward_cycle = current_reward_cycle.saturating_add(1); + info!("Signer is not registered for the current reward cycle ({current_reward_cycle}). Reward set is not yet determined or signer is not registered for the upcoming reward cycle ({next_reward_cycle})."); } None } } + #[cfg(test)] mod tests { use blockstack_lib::chainstate::stacks::boot::NakamotoSignerEntry; use libsigner::SignerEntries; + use rand::{thread_rng, Rng, RngCore}; use stacks_common::types::chainstate::{StacksPrivateKey, StacksPublicKey}; + use super::RewardCycleInfo; + #[test] fn parse_nakamoto_signer_entries_test() { let nmb_signers = 10; @@ -462,4 +600,119 @@ mod tests { (0..nmb_signers).map(|id| id as u32).collect::>() ); } + + #[test] + fn is_in_reward_cycle_info() { + let rand_byte: u8 = std::cmp::max(1, thread_rng().gen()); + let prepare_phase_block_length = rand_byte as u64; + // Ensure the reward cycle is not close to u64 Max to prevent overflow when adding prepare phase len + let reward_cycle_length = (std::cmp::max( + prepare_phase_block_length.wrapping_add(1), + thread_rng().next_u32() as u64, + )) + .wrapping_add(prepare_phase_block_length); + let reward_cycle_phase_block_length = + reward_cycle_length.wrapping_sub(prepare_phase_block_length); + let first_burnchain_block_height = std::cmp::max(1u8, thread_rng().gen()) as u64; + let last_burnchain_block_height = thread_rng().gen_range( + first_burnchain_block_height + ..first_burnchain_block_height + .wrapping_add(reward_cycle_length) + .wrapping_sub(prepare_phase_block_length), + ); + let blocks_mined = last_burnchain_block_height.wrapping_sub(first_burnchain_block_height); + let reward_cycle = blocks_mined / reward_cycle_length; + + let reward_cycle_info = RewardCycleInfo { + reward_cycle, + reward_cycle_length, + prepare_phase_block_length, + first_burnchain_block_height, + last_burnchain_block_height, + }; + assert!(reward_cycle_info.is_in_reward_cycle(first_burnchain_block_height)); + assert!(reward_cycle_info.is_in_reward_cycle(last_burnchain_block_height)); + assert!(!reward_cycle_info + .is_in_reward_cycle(first_burnchain_block_height.wrapping_add(reward_cycle_length))); + + assert!(reward_cycle_info.is_in_reward_cycle( + first_burnchain_block_height + .wrapping_add(reward_cycle_length) + .wrapping_sub(1) + )); + + assert!(reward_cycle_info.is_in_reward_cycle( + first_burnchain_block_height.wrapping_add(reward_cycle_phase_block_length) + )); + assert!(reward_cycle_info.is_in_reward_cycle(first_burnchain_block_height.wrapping_add(1))); + + assert!(reward_cycle_info.is_in_reward_cycle( + first_burnchain_block_height + .wrapping_add(reward_cycle_phase_block_length) + .wrapping_add(1) + )); + } + + #[test] + fn is_in_next_prepare_phase() { + let reward_cycle_info = RewardCycleInfo { + reward_cycle: 5, + reward_cycle_length: 10, + prepare_phase_block_length: 5, + first_burnchain_block_height: 0, + last_burnchain_block_height: 50, + }; + + assert!(!reward_cycle_info.is_in_next_prepare_phase(49)); + assert!(!reward_cycle_info.is_in_next_prepare_phase(50)); + assert!(!reward_cycle_info.is_in_next_prepare_phase(51)); + assert!(!reward_cycle_info.is_in_next_prepare_phase(52)); + assert!(!reward_cycle_info.is_in_next_prepare_phase(53)); + assert!(!reward_cycle_info.is_in_next_prepare_phase(54)); + assert!(reward_cycle_info.is_in_next_prepare_phase(55)); + assert!(reward_cycle_info.is_in_next_prepare_phase(56)); + assert!(reward_cycle_info.is_in_next_prepare_phase(57)); + assert!(reward_cycle_info.is_in_next_prepare_phase(58)); + assert!(reward_cycle_info.is_in_next_prepare_phase(59)); + assert!(!reward_cycle_info.is_in_next_prepare_phase(60)); + assert!(!reward_cycle_info.is_in_next_prepare_phase(61)); + + let rand_byte: u8 = std::cmp::max(1, thread_rng().gen()); + let prepare_phase_block_length = rand_byte as u64; + // Ensure the reward cycle is not close to u64 Max to prevent overflow when adding prepare phase len + let reward_cycle_length = (std::cmp::max( + prepare_phase_block_length.wrapping_add(1), + thread_rng().next_u32() as u64, + )) + .wrapping_add(prepare_phase_block_length); + let reward_cycle_phase_block_length = + reward_cycle_length.wrapping_sub(prepare_phase_block_length); + let first_burnchain_block_height = std::cmp::max(1u8, thread_rng().gen()) as u64; + let last_burnchain_block_height = thread_rng().gen_range( + first_burnchain_block_height + ..first_burnchain_block_height + .wrapping_add(reward_cycle_length) + .wrapping_sub(prepare_phase_block_length), + ); + let blocks_mined = last_burnchain_block_height.wrapping_sub(first_burnchain_block_height); + let reward_cycle = blocks_mined / reward_cycle_length; + + let reward_cycle_info = RewardCycleInfo { + reward_cycle, + reward_cycle_length, + prepare_phase_block_length, + first_burnchain_block_height, + last_burnchain_block_height, + }; + + for i in 0..reward_cycle_length { + if i < reward_cycle_phase_block_length { + assert!(!reward_cycle_info + .is_in_next_prepare_phase(first_burnchain_block_height.wrapping_add(i))); + } else { + assert!(reward_cycle_info + .is_in_next_prepare_phase(first_burnchain_block_height.wrapping_add(i))); + } + } + } } diff --git a/stacks-signer/src/signerdb.rs b/stacks-signer/src/signerdb.rs index ea9c4eeb17f..7dcd087ec5c 100644 --- a/stacks-signer/src/signerdb.rs +++ b/stacks-signer/src/signerdb.rs @@ -15,17 +15,135 @@ // along with this program. If not, see . use std::path::Path; +use std::time::SystemTime; +use blockstack_lib::chainstate::nakamoto::{NakamotoBlock, NakamotoBlockVote}; use blockstack_lib::util_lib::db::{ - query_row, sqlite_open, table_exists, u64_to_sql, Error as DBError, + query_row, sqlite_open, table_exists, tx_begin_immediate, u64_to_sql, Error as DBError, }; -use rusqlite::{params, Connection, Error as SqliteError, OpenFlags, NO_PARAMS}; -use slog::slog_debug; -use stacks_common::debug; +use clarity::types::chainstate::BurnchainHeaderHash; +use clarity::util::get_epoch_time_secs; +use libsigner::BlockProposal; +use rusqlite::{ + params, Connection, Error as SqliteError, OpenFlags, OptionalExtension, Transaction, +}; +use serde::{Deserialize, Serialize}; +use slog::{slog_debug, slog_error}; +use stacks_common::types::chainstate::ConsensusHash; use stacks_common::util::hash::Sha512Trunc256Sum; -use wsts::traits::SignerState; +use stacks_common::{debug, error}; +use wsts::net::NonceRequest; + +#[derive(Serialize, Deserialize, Debug, PartialEq, Default)] +/// Information specific to Signer V1 +pub struct BlockInfoV1 { + /// The associated packet nonce request if we have one + pub nonce_request: Option, +} + +impl From for BlockInfoV1 { + fn from(value: NonceRequest) -> Self { + Self { + nonce_request: Some(value), + } + } +} + +#[derive(Serialize, Deserialize, Debug, PartialEq, Default)] +/// Store extra version-specific info in `BlockInfo` +pub enum ExtraBlockInfo { + #[default] + /// Don't know what version + None, + /// Extra data for Signer V0 + V0, + /// Extra data for Signer V1 + V1(BlockInfoV1), +} + +impl ExtraBlockInfo { + /// Take `nonce_request` if it exists + pub fn take_nonce_request(&mut self) -> Option { + match self { + ExtraBlockInfo::None | ExtraBlockInfo::V0 => None, + ExtraBlockInfo::V1(v1) => v1.nonce_request.take(), + } + } + /// Set `nonce_request` if it exists + pub fn set_nonce_request(&mut self, value: NonceRequest) -> Result<(), &str> { + match self { + ExtraBlockInfo::None | ExtraBlockInfo::V0 => Err("Field doesn't exist"), + ExtraBlockInfo::V1(v1) => { + v1.nonce_request = Some(value); + Ok(()) + } + } + } +} + +/// Additional Info about a proposed block +#[derive(Serialize, Deserialize, Debug, PartialEq)] +pub struct BlockInfo { + /// The block we are considering + pub block: NakamotoBlock, + /// The burn block height at which the block was proposed + pub burn_block_height: u64, + /// The reward cycle the block belongs to + pub reward_cycle: u64, + /// Our vote on the block if we have one yet + pub vote: Option, + /// Whether the block contents are valid + pub valid: Option, + /// Whether this block is already being signed over + pub signed_over: bool, + /// Time at which the proposal was received by this signer (epoch time in seconds) + pub proposed_time: u64, + /// Time at which the proposal was signed by this signer (epoch time in seconds) + pub signed_self: Option, + /// Time at which the proposal was signed by a threshold in the signer set (epoch time in seconds) + pub signed_group: Option, + /// Extra data specific to v0, v1, etc. + pub ext: ExtraBlockInfo, +} -use crate::signer::BlockInfo; +impl From for BlockInfo { + fn from(value: BlockProposal) -> Self { + Self { + block: value.block, + burn_block_height: value.burn_height, + reward_cycle: value.reward_cycle, + vote: None, + valid: None, + signed_over: false, + proposed_time: get_epoch_time_secs(), + signed_self: None, + signed_group: None, + ext: ExtraBlockInfo::default(), + } + } +} +impl BlockInfo { + /// Create a new BlockInfo with an associated nonce request packet + pub fn new_v1_with_request(block_proposal: BlockProposal, nonce_request: NonceRequest) -> Self { + let mut block_info = BlockInfo::from(block_proposal); + block_info.ext = ExtraBlockInfo::V1(BlockInfoV1::from(nonce_request)); + block_info.signed_over = true; + block_info + } + + /// Mark this block as valid, signed over, and record a timestamp in the block info if it wasn't + /// already set. + pub fn mark_signed_and_valid(&mut self) { + self.valid = Some(true); + self.signed_over = true; + self.signed_self.get_or_insert(get_epoch_time_secs()); + } + + /// Return the block's signer signature hash + pub fn signer_signature_hash(&self) -> Sha512Trunc256Sum { + self.block.header.signer_signature_hash() + } +} /// This struct manages a SQLite database connection /// for the signer. @@ -35,43 +153,123 @@ pub struct SignerDb { db: Connection, } -const CREATE_BLOCKS_TABLE: &'static str = " +static CREATE_BLOCKS_TABLE: &str = " CREATE TABLE IF NOT EXISTS blocks ( reward_cycle INTEGER NOT NULL, signer_signature_hash TEXT NOT NULL, block_info TEXT NOT NULL, + consensus_hash TEXT NOT NULL, + signed_over INTEGER NOT NULL, + stacks_height INTEGER NOT NULL, + burn_block_height INTEGER NOT NULL, PRIMARY KEY (reward_cycle, signer_signature_hash) -)"; +) STRICT"; + +static CREATE_INDEXES: &str = " +CREATE INDEX IF NOT EXISTS blocks_signed_over ON blocks (signed_over); +CREATE INDEX IF NOT EXISTS blocks_consensus_hash ON blocks (consensus_hash); +CREATE INDEX IF NOT EXISTS blocks_valid ON blocks ((json_extract(block_info, '$.valid'))); +CREATE INDEX IF NOT EXISTS burn_blocks_height ON burn_blocks (block_height); +"; -const CREATE_SIGNER_STATE_TABLE: &'static str = " +static CREATE_SIGNER_STATE_TABLE: &str = " CREATE TABLE IF NOT EXISTS signer_states ( reward_cycle INTEGER PRIMARY KEY, - state TEXT NOT NULL -)"; + encrypted_state BLOB NOT NULL +) STRICT"; + +static CREATE_BURN_STATE_TABLE: &str = " +CREATE TABLE IF NOT EXISTS burn_blocks ( + block_hash TEXT PRIMARY KEY, + block_height INTEGER NOT NULL, + received_time INTEGER NOT NULL +) STRICT"; + +static CREATE_DB_CONFIG: &str = " + CREATE TABLE db_config( + version INTEGER NOT NULL + ) STRICT +"; + +static DROP_SCHEMA_0: &str = " + DROP TABLE IF EXISTS burn_blocks; + DROP TABLE IF EXISTS signer_states; + DROP TABLE IF EXISTS blocks; + DROP TABLE IF EXISTS db_config;"; + +static SCHEMA_1: &[&str] = &[ + DROP_SCHEMA_0, + CREATE_DB_CONFIG, + CREATE_BURN_STATE_TABLE, + CREATE_BLOCKS_TABLE, + CREATE_SIGNER_STATE_TABLE, + CREATE_INDEXES, + "INSERT INTO db_config (version) VALUES (1);", +]; impl SignerDb { + /// The current schema version used in this build of the signer binary. + pub const SCHEMA_VERSION: u32 = 1; + /// Create a new `SignerState` instance. /// This will create a new SQLite database at the given path /// or an in-memory database if the path is ":memory:" pub fn new(db_path: impl AsRef) -> Result { let connection = Self::connect(db_path)?; - let signer_db = Self { db: connection }; - - signer_db.instantiate_db()?; + let mut signer_db = Self { db: connection }; + signer_db.create_or_migrate()?; Ok(signer_db) } - fn instantiate_db(&self) -> Result<(), DBError> { - if !table_exists(&self.db, "blocks")? { - self.db.execute(CREATE_BLOCKS_TABLE, NO_PARAMS)?; + /// Returns the schema version of the database + fn get_schema_version(conn: &Connection) -> Result { + if !table_exists(conn, "db_config")? { + return Ok(0); } + let result = conn + .query_row("SELECT version FROM db_config LIMIT 1", [], |row| { + row.get(0) + }) + .optional(); + match result { + Ok(x) => Ok(x.unwrap_or(0)), + Err(e) => Err(DBError::from(e)), + } + } - if !table_exists(&self.db, "signer_states")? { - self.db.execute(CREATE_SIGNER_STATE_TABLE, NO_PARAMS)?; + /// Migrate from schema 0 to schema 1 + fn schema_1_migration(tx: &Transaction) -> Result<(), DBError> { + if Self::get_schema_version(tx)? >= 1 { + // no migration necessary + return Ok(()); } + for statement in SCHEMA_1.iter() { + tx.execute_batch(statement)?; + } + + Ok(()) + } + + /// Either instantiate a new database, or migrate an existing one + /// If the detected version of the existing database is 0 (i.e., a pre-migration + /// logic DB, the DB will be dropped). + fn create_or_migrate(&mut self) -> Result<(), DBError> { + let sql_tx = tx_begin_immediate(&mut self.db)?; + loop { + let version = Self::get_schema_version(&sql_tx)?; + match version { + 0 => Self::schema_1_migration(&sql_tx)?, + 1 => break, + x => return Err(DBError::Other(format!( + "Database schema is newer than supported by this binary. Expected version = {}, Database version = {x}", + Self::SCHEMA_VERSION, + ))), + } + } + sql_tx.commit()?; Ok(()) } @@ -84,26 +282,26 @@ impl SignerDb { } /// Get the signer state for the provided reward cycle if it exists in the database - pub fn get_signer_state(&self, reward_cycle: u64) -> Result, DBError> { - let result: Option = query_row( + pub fn get_encrypted_signer_state( + &self, + reward_cycle: u64, + ) -> Result>, DBError> { + query_row( &self.db, - "SELECT state FROM signer_states WHERE reward_cycle = ?", - &[u64_to_sql(reward_cycle)?], - )?; - - try_deserialize(result) + "SELECT encrypted_state FROM signer_states WHERE reward_cycle = ?", + [u64_to_sql(reward_cycle)?], + ) } /// Insert the given state in the `signer_states` table for the given reward cycle - pub fn insert_signer_state( + pub fn insert_encrypted_signer_state( &self, reward_cycle: u64, - signer_state: &SignerState, + encrypted_signer_state: &[u8], ) -> Result<(), DBError> { - let serialized_state = serde_json::to_string(signer_state)?; self.db.execute( - "INSERT OR REPLACE INTO signer_states (reward_cycle, state) VALUES (?1, ?2)", - params![&u64_to_sql(reward_cycle)?, &serialized_state], + "INSERT OR REPLACE INTO signer_states (reward_cycle, encrypted_state) VALUES (?1, ?2)", + params![u64_to_sql(reward_cycle)?, encrypted_signer_state], )?; Ok(()) } @@ -118,42 +316,117 @@ impl SignerDb { let result: Option = query_row( &self.db, "SELECT block_info FROM blocks WHERE reward_cycle = ? AND signer_signature_hash = ?", - params![&u64_to_sql(reward_cycle)?, hash.to_string()], + params![u64_to_sql(reward_cycle)?, hash.to_string()], )?; try_deserialize(result) } - /// Insert a block into the database. - /// `hash` is the `signer_signature_hash` of the block. - pub fn insert_block( + /// Return the last signed block in a tenure (identified by its consensus hash) + pub fn get_last_signed_block_in_tenure( + &self, + tenure: &ConsensusHash, + ) -> Result, DBError> { + let query = "SELECT block_info FROM blocks WHERE consensus_hash = ? AND signed_over = 1 ORDER BY stacks_height DESC LIMIT 1"; + let result: Option = query_row(&self.db, query, [tenure])?; + + try_deserialize(result) + } + + /// Return the first signed block in a tenure (identified by its consensus hash) + pub fn get_first_signed_block_in_tenure( + &self, + tenure: &ConsensusHash, + ) -> Result, DBError> { + let query = "SELECT block_info FROM blocks WHERE consensus_hash = ? AND signed_over = 1 ORDER BY stacks_height ASC LIMIT 1"; + let result: Option = query_row(&self.db, query, [tenure])?; + + try_deserialize(result) + } + + /// Insert or replace a burn block into the database + pub fn insert_burn_block( &mut self, - reward_cycle: u64, - block_info: &BlockInfo, + burn_hash: &BurnchainHeaderHash, + burn_height: u64, + received_time: &SystemTime, ) -> Result<(), DBError> { + let received_ts = received_time + .duration_since(std::time::UNIX_EPOCH) + .map_err(|e| DBError::Other(format!("Bad system time: {e}")))? + .as_secs(); + debug!("Inserting burn block info"; "burn_block_height" => burn_height, "burn_hash" => %burn_hash, "received" => received_ts); + self.db.execute( + "INSERT OR REPLACE INTO burn_blocks (block_hash, block_height, received_time) VALUES (?1, ?2, ?3)", + params![ + burn_hash, + u64_to_sql(burn_height)?, + u64_to_sql(received_ts)?, + ], + )?; + Ok(()) + } + + /// Get timestamp (epoch seconds) at which a burn block was received over the event dispatcheer by this signer + /// if that burn block has been received. + pub fn get_burn_block_receive_time( + &self, + burn_hash: &BurnchainHeaderHash, + ) -> Result, DBError> { + let query = "SELECT received_time FROM burn_blocks WHERE block_hash = ? LIMIT 1"; + let Some(receive_time_i64) = query_row::(&self.db, query, &[burn_hash])? else { + return Ok(None); + }; + let receive_time = u64::try_from(receive_time_i64).map_err(|e| { + error!("Failed to parse db received_time as u64: {e}"); + DBError::Corruption + })?; + Ok(Some(receive_time)) + } + + /// Insert or replace a block into the database. + /// `hash` is the `signer_signature_hash` of the block. + pub fn insert_block(&mut self, block_info: &BlockInfo) -> Result<(), DBError> { let block_json = serde_json::to_string(&block_info).expect("Unable to serialize block info"); let hash = &block_info.signer_signature_hash(); let block_id = &block_info.block.block_id(); let signed_over = &block_info.signed_over; - debug!( - "Inserting block_info: reward_cycle = {reward_cycle}, sighash = {hash}, block_id = {block_id}, signed = {signed_over} vote = {:?}", - block_info.vote.as_ref().map(|v| { - if v.rejected { - "REJECT" - } else { - "ACCEPT" - } - }) + let vote = block_info + .vote + .as_ref() + .map(|v| if v.rejected { "REJECT" } else { "ACCEPT" }); + + debug!("Inserting block_info."; + "reward_cycle" => %block_info.reward_cycle, + "burn_block_height" => %block_info.burn_block_height, + "sighash" => %hash, + "block_id" => %block_id, + "signed" => %signed_over, + "vote" => vote ); self.db .execute( - "INSERT OR REPLACE INTO blocks (reward_cycle, signer_signature_hash, block_info) VALUES (?1, ?2, ?3)", - params![&u64_to_sql(reward_cycle)?, hash.to_string(), &block_json], + "INSERT OR REPLACE INTO blocks (reward_cycle, burn_block_height, signer_signature_hash, block_info, signed_over, stacks_height, consensus_hash) VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7)", + params![ + u64_to_sql(block_info.reward_cycle)?, u64_to_sql(block_info.burn_block_height)?, hash.to_string(), block_json, + signed_over, + u64_to_sql(block_info.block.header.chain_length)?, + block_info.block.header.consensus_hash.to_hex(), + ], )?; Ok(()) } + + /// Determine if there are any pending blocks that have not yet been processed by checking the block_info.valid field + pub fn has_pending_blocks(&self, reward_cycle: u64) -> Result { + let query = "SELECT block_info FROM blocks WHERE reward_cycle = ? AND json_extract(block_info, '$.valid') IS NULL LIMIT 1"; + let result: Option = + query_row(&self.db, query, params!(&u64_to_sql(reward_cycle)?))?; + + Ok(result.is_some()) + } } fn try_deserialize(s: Option) -> Result, DBError> @@ -170,8 +443,8 @@ where pub fn test_signer_db(db_path: &str) -> SignerDb { use std::fs; - if fs::metadata(&db_path).is_ok() { - fs::remove_file(&db_path).unwrap(); + if fs::metadata(db_path).is_ok() { + fs::remove_file(db_path).unwrap(); } SignerDb::new(db_path).expect("Failed to create signer db") } @@ -184,16 +457,8 @@ mod tests { use blockstack_lib::chainstate::nakamoto::{ NakamotoBlock, NakamotoBlockHeader, NakamotoBlockVote, }; - use blockstack_lib::chainstate::stacks::ThresholdSignature; - use num_traits::identities::Zero; - use polynomial::Polynomial; - use stacks_common::bitvec::BitVec; - use stacks_common::types::chainstate::{ConsensusHash, StacksBlockId, TrieHash}; - use stacks_common::util::secp256k1::MessageSignature; - use wsts::common::Nonce; - use wsts::curve::point::Point; - use wsts::curve::scalar::Scalar; - use wsts::traits::PartyState; + use clarity::util::secp256k1::MessageSignature; + use libsigner::BlockProposal; use super::*; @@ -204,53 +469,23 @@ mod tests { } fn create_block_override( - overrides: impl FnOnce(&mut NakamotoBlock), - ) -> (BlockInfo, NakamotoBlock) { - let header = NakamotoBlockHeader { - version: 1, - chain_length: 2, - burn_spent: 3, - consensus_hash: ConsensusHash([0x04; 20]), - parent_block_id: StacksBlockId([0x05; 32]), - tx_merkle_root: Sha512Trunc256Sum([0x06; 32]), - state_index_root: TrieHash([0x07; 32]), - miner_signature: MessageSignature::empty(), - signer_signature: ThresholdSignature::empty(), - signer_bitvec: BitVec::zeros(1).unwrap(), - }; - let mut block = NakamotoBlock { + overrides: impl FnOnce(&mut BlockProposal), + ) -> (BlockInfo, BlockProposal) { + let header = NakamotoBlockHeader::empty(); + let block = NakamotoBlock { header, txs: vec![], }; - overrides(&mut block); - (BlockInfo::new(block.clone()), block) - } - - fn create_signer_state(id: u32) -> SignerState { - let ps1 = PartyState { - polynomial: Some(Polynomial::new(vec![1.into(), 2.into(), 3.into()])), - private_keys: vec![(1, 45.into()), (2, 56.into())], - nonce: Nonce::zero(), - }; - - let ps2 = PartyState { - polynomial: Some(Polynomial::new(vec![1.into(), 2.into(), 3.into()])), - private_keys: vec![(1, 45.into()), (2, 56.into())], - nonce: Nonce::zero(), + let mut block_proposal = BlockProposal { + block, + burn_height: 7, + reward_cycle: 42, }; - - SignerState { - id, - key_ids: vec![2, 4], - num_keys: 12, - num_parties: 10, - threshold: 7, - group_key: Point::from(Scalar::from(42)), - parties: vec![(2, ps1), (4, ps2)], - } + overrides(&mut block_proposal); + (BlockInfo::from(block_proposal.clone()), block_proposal) } - fn create_block() -> (BlockInfo, NakamotoBlock) { + fn create_block() -> (BlockInfo, BlockProposal) { create_block_override(|_| {}) } @@ -263,21 +498,26 @@ mod tests { fn test_basic_signer_db_with_path(db_path: impl AsRef) { let mut db = SignerDb::new(db_path).expect("Failed to create signer db"); - let reward_cycle = 1; - let (block_info, block) = create_block(); - db.insert_block(reward_cycle, &block_info) + let (block_info, block_proposal) = create_block(); + let reward_cycle = block_info.reward_cycle; + db.insert_block(&block_info) .expect("Unable to insert block into db"); - let block_info = db - .block_lookup(reward_cycle, &block.header.signer_signature_hash()) + .block_lookup( + reward_cycle, + &block_proposal.block.header.signer_signature_hash(), + ) .unwrap() .expect("Unable to get block from db"); - assert_eq!(BlockInfo::new(block.clone()), block_info); + assert_eq!(BlockInfo::from(block_proposal.clone()), block_info); // Test looking up a block from a different reward cycle let block_info = db - .block_lookup(reward_cycle + 1, &block.header.signer_signature_hash()) + .block_lookup( + reward_cycle + 1, + &block_proposal.block.header.signer_signature_hash(), + ) .unwrap(); assert!(block_info.is_none()); } @@ -297,23 +537,27 @@ mod tests { fn test_update_block() { let db_path = tmp_db_path(); let mut db = SignerDb::new(db_path).expect("Failed to create signer db"); - let reward_cycle = 42; - let (block_info, block) = create_block(); - db.insert_block(reward_cycle, &block_info) + let (block_info, block_proposal) = create_block(); + let reward_cycle = block_info.reward_cycle; + db.insert_block(&block_info) .expect("Unable to insert block into db"); let block_info = db - .block_lookup(reward_cycle, &block.header.signer_signature_hash()) + .block_lookup( + reward_cycle, + &block_proposal.block.header.signer_signature_hash(), + ) .unwrap() .expect("Unable to get block from db"); - assert_eq!(BlockInfo::new(block.clone()), block_info); + assert_eq!(BlockInfo::from(block_proposal.clone()), block_info); let old_block_info = block_info; - let old_block = block; + let old_block_proposal = block_proposal; - let (mut block_info, block) = create_block_override(|b| { - b.header.signer_signature = old_block.header.signer_signature.clone(); + let (mut block_info, block_proposal) = create_block_override(|b| { + b.block.header.signer_signature = + old_block_proposal.block.header.signer_signature.clone(); }); assert_eq!( block_info.signer_signature_hash(), @@ -324,11 +568,14 @@ mod tests { rejected: false, }; block_info.vote = Some(vote.clone()); - db.insert_block(reward_cycle, &block_info) + db.insert_block(&block_info) .expect("Unable to insert block into db"); let block_info = db - .block_lookup(reward_cycle, &block.header.signer_signature_hash()) + .block_lookup( + reward_cycle, + &block_proposal.block.header.signer_signature_hash(), + ) .unwrap() .expect("Unable to get block from db"); @@ -336,40 +583,124 @@ mod tests { assert_eq!(block_info.vote, Some(vote)); } + #[test] + fn get_first_signed_block() { + let db_path = tmp_db_path(); + let mut db = SignerDb::new(db_path).expect("Failed to create signer db"); + let (mut block_info, block_proposal) = create_block(); + db.insert_block(&block_info).unwrap(); + + assert!(db + .get_first_signed_block_in_tenure(&block_proposal.block.header.consensus_hash) + .unwrap() + .is_none()); + + block_info.mark_signed_and_valid(); + db.insert_block(&block_info).unwrap(); + + let fetched_info = db + .get_first_signed_block_in_tenure(&block_proposal.block.header.consensus_hash) + .unwrap() + .unwrap(); + assert_eq!(fetched_info, block_info); + } + + #[test] + fn insert_burn_block_get_time() { + let db_path = tmp_db_path(); + let mut db = SignerDb::new(db_path).expect("Failed to create signer db"); + let test_burn_hash = BurnchainHeaderHash([10; 32]); + let stime = SystemTime::now(); + let time_to_epoch = stime + .duration_since(SystemTime::UNIX_EPOCH) + .unwrap() + .as_secs(); + db.insert_burn_block(&test_burn_hash, 10, &stime).unwrap(); + + let stored_time = db + .get_burn_block_receive_time(&test_burn_hash) + .unwrap() + .unwrap(); + assert_eq!(stored_time, time_to_epoch); + } + #[test] fn test_write_signer_state() { let db_path = tmp_db_path(); let db = SignerDb::new(db_path).expect("Failed to create signer db"); - let state_0 = create_signer_state(0); - let state_1 = create_signer_state(1); + let state_0 = vec![0]; + let state_1 = vec![1; 1024]; - db.insert_signer_state(10, &state_0) + db.insert_encrypted_signer_state(10, &state_0) .expect("Failed to insert signer state"); - db.insert_signer_state(11, &state_1) + db.insert_encrypted_signer_state(11, &state_1) .expect("Failed to insert signer state"); assert_eq!( - db.get_signer_state(10) + db.get_encrypted_signer_state(10) .expect("Failed to get signer state") - .unwrap() - .id, - state_0.id + .unwrap(), + state_0 ); assert_eq!( - db.get_signer_state(11) + db.get_encrypted_signer_state(11) .expect("Failed to get signer state") - .unwrap() - .id, - state_1.id + .unwrap(), + state_1 ); assert!(db - .get_signer_state(12) + .get_encrypted_signer_state(12) .expect("Failed to get signer state") .is_none()); assert!(db - .get_signer_state(9) + .get_encrypted_signer_state(9) .expect("Failed to get signer state") .is_none()); } + + #[test] + fn test_has_pending_blocks() { + let db_path = tmp_db_path(); + let mut db = SignerDb::new(db_path).expect("Failed to create signer db"); + let (mut block_info_1, _block_proposal) = create_block_override(|b| { + b.block.header.miner_signature = MessageSignature([0x01; 65]); + b.burn_height = 1; + }); + let (mut block_info_2, _block_proposal) = create_block_override(|b| { + b.block.header.miner_signature = MessageSignature([0x02; 65]); + b.burn_height = 2; + }); + + db.insert_block(&block_info_1) + .expect("Unable to insert block into db"); + db.insert_block(&block_info_2) + .expect("Unable to insert block into db"); + + assert!(db.has_pending_blocks(block_info_1.reward_cycle).unwrap()); + + block_info_1.valid = Some(true); + + db.insert_block(&block_info_1) + .expect("Unable to update block in db"); + + assert!(db.has_pending_blocks(block_info_1.reward_cycle).unwrap()); + + block_info_2.valid = Some(true); + + db.insert_block(&block_info_2) + .expect("Unable to update block in db"); + + assert!(!db.has_pending_blocks(block_info_1.reward_cycle).unwrap()); + } + + #[test] + fn test_sqlite_version() { + let db_path = tmp_db_path(); + let db = SignerDb::new(db_path).expect("Failed to create signer db"); + assert_eq!( + query_row(&db.db, "SELECT sqlite_version()", []).unwrap(), + Some("3.45.0".to_string()) + ); + } } diff --git a/stacks-signer/src/tests/chainstate.rs b/stacks-signer/src/tests/chainstate.rs new file mode 100644 index 00000000000..d0c7f1d9f3f --- /dev/null +++ b/stacks-signer/src/tests/chainstate.rs @@ -0,0 +1,466 @@ +// Copyright (C) 2024 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use std::fs; +use std::net::{Ipv4Addr, SocketAddrV4}; +use std::time::{Duration, SystemTime}; + +use blockstack_lib::chainstate::nakamoto::{NakamotoBlock, NakamotoBlockHeader}; +use blockstack_lib::chainstate::stacks::{ + CoinbasePayload, SinglesigHashMode, SinglesigSpendingCondition, StacksTransaction, + TenureChangeCause, TenureChangePayload, TransactionAnchorMode, TransactionAuth, + TransactionPayload, TransactionPostConditionMode, TransactionPublicKeyEncoding, + TransactionSpendingCondition, TransactionVersion, +}; +use blockstack_lib::net::api::get_tenures_fork_info::TenureForkingInfo; +use clarity::types::chainstate::{BurnchainHeaderHash, SortitionId}; +use clarity::util::vrf::VRFProof; +use libsigner::BlockProposal; +use slog::slog_info; +use stacks_common::bitvec::BitVec; +use stacks_common::info; +use stacks_common::types::chainstate::{ + ConsensusHash, StacksBlockId, StacksPrivateKey, StacksPublicKey, TrieHash, +}; +use stacks_common::util::get_epoch_time_secs; +use stacks_common::util::hash::{Hash160, Sha512Trunc256Sum}; +use stacks_common::util::secp256k1::MessageSignature; + +use crate::chainstate::{ + ProposalEvalConfig, SignerChainstateError, SortitionMinerStatus, SortitionState, SortitionsView, +}; +use crate::client::tests::MockServerClient; +use crate::client::StacksClient; +use crate::signerdb::{BlockInfo, SignerDb}; + +fn setup_test_environment( + fn_name: &str, +) -> ( + StacksClient, + SignerDb, + StacksPublicKey, + SortitionsView, + NakamotoBlock, +) { + let block_sk = StacksPrivateKey::from_seed(&[0, 1]); + let block_pk = StacksPublicKey::from_private(&block_sk); + let block_pkh = Hash160::from_node_public_key(&block_pk); + + let cur_sortition = SortitionState { + miner_pkh: block_pkh, + miner_pubkey: None, + prior_sortition: ConsensusHash([0; 20]), + parent_tenure_id: ConsensusHash([0; 20]), + consensus_hash: ConsensusHash([1; 20]), + miner_status: SortitionMinerStatus::Valid, + burn_header_timestamp: 2, + burn_block_hash: BurnchainHeaderHash([1; 32]), + }; + + let last_sortition = Some(SortitionState { + miner_pkh: block_pkh, + miner_pubkey: None, + prior_sortition: ConsensusHash([128; 20]), + parent_tenure_id: ConsensusHash([128; 20]), + consensus_hash: ConsensusHash([0; 20]), + miner_status: SortitionMinerStatus::Valid, + burn_header_timestamp: 1, + burn_block_hash: BurnchainHeaderHash([0; 32]), + }); + + let view = SortitionsView { + latest_consensus_hash: cur_sortition.consensus_hash, + cur_sortition, + last_sortition, + config: ProposalEvalConfig { + first_proposal_burn_block_timing: Duration::from_secs(30), + block_proposal_timeout: Duration::from_secs(5), + }, + }; + + let stacks_client = StacksClient::new( + StacksPrivateKey::new(), + SocketAddrV4::new(Ipv4Addr::new(127, 0, 0, 1), 10000).into(), + "FOO".into(), + false, + ); + + let signer_db_dir = "/tmp/stacks-node-tests/signer-units/"; + let signer_db_path = format!("{signer_db_dir}/{fn_name}.{}.sqlite", get_epoch_time_secs()); + fs::create_dir_all(signer_db_dir).unwrap(); + let signer_db = SignerDb::new(signer_db_path).unwrap(); + + let block = NakamotoBlock { + header: NakamotoBlockHeader { + version: 1, + chain_length: 10, + burn_spent: 10, + consensus_hash: ConsensusHash([15; 20]), + parent_block_id: StacksBlockId([0; 32]), + tx_merkle_root: Sha512Trunc256Sum([0; 32]), + state_index_root: TrieHash([0; 32]), + timestamp: 3, + miner_signature: MessageSignature::empty(), + signer_signature: vec![], + pox_treatment: BitVec::ones(1).unwrap(), + }, + txs: vec![], + }; + + (stacks_client, signer_db, block_pk, view, block) +} + +#[test] +fn check_proposal_units() { + let (stacks_client, signer_db, block_pk, mut view, block) = + setup_test_environment("check_proposal_units"); + + assert!(!view + .check_proposal(&stacks_client, &signer_db, &block, &block_pk,) + .unwrap()); + + view.last_sortition = None; + + assert!(!view + .check_proposal(&stacks_client, &signer_db, &block, &block_pk,) + .unwrap()); +} + +#[test] +fn check_proposal_miner_pkh_mismatch() { + let (stacks_client, signer_db, _block_pk, mut view, mut block) = + setup_test_environment("miner_pkh_mismatch"); + block.header.consensus_hash = view.cur_sortition.consensus_hash; + let different_block_pk = StacksPublicKey::from_private(&StacksPrivateKey::from_seed(&[2, 3])); + assert!(!view + .check_proposal(&stacks_client, &signer_db, &block, &different_block_pk) + .unwrap()); + + block.header.consensus_hash = view.last_sortition.as_ref().unwrap().consensus_hash; + assert!(!view + .check_proposal(&stacks_client, &signer_db, &block, &different_block_pk) + .unwrap()); +} + +fn reorg_timing_testing( + test_name: &str, + first_proposal_burn_block_timing_secs: u64, + sortition_timing_secs: u64, +) -> Result { + let (_stacks_client, mut signer_db, block_pk, mut view, mut block) = + setup_test_environment(test_name); + view.config.first_proposal_burn_block_timing = + Duration::from_secs(first_proposal_burn_block_timing_secs); + + view.cur_sortition.parent_tenure_id = view.last_sortition.as_ref().unwrap().parent_tenure_id; + block.header.consensus_hash = view.cur_sortition.consensus_hash; + block.txs.push(StacksTransaction::new( + TransactionVersion::Testnet, + TransactionAuth::Standard(TransactionSpendingCondition::new_initial_sighash()), + TransactionPayload::TenureChange(TenureChangePayload { + tenure_consensus_hash: view.cur_sortition.consensus_hash, + prev_tenure_consensus_hash: view.cur_sortition.parent_tenure_id, + burn_view_consensus_hash: view.cur_sortition.consensus_hash, + previous_tenure_end: block.header.parent_block_id, + previous_tenure_blocks: 10, + cause: TenureChangeCause::BlockFound, + pubkey_hash: Hash160::from_node_public_key(&block_pk), + }), + )); + block.txs.push(StacksTransaction::new( + TransactionVersion::Testnet, + TransactionAuth::Standard(TransactionSpendingCondition::new_initial_sighash()), + TransactionPayload::Coinbase(CoinbasePayload([0; 32]), None, Some(VRFProof::empty())), + )); + + let last_sortition = view.last_sortition.as_ref().unwrap(); + + let expected_result = vec![ + TenureForkingInfo { + burn_block_hash: last_sortition.burn_block_hash, + burn_block_height: 2, + sortition_id: SortitionId([2; 32]), + parent_sortition_id: SortitionId([1; 32]), + consensus_hash: last_sortition.consensus_hash, + was_sortition: true, + first_block_mined: Some(StacksBlockId([1; 32])), + }, + TenureForkingInfo { + burn_block_hash: BurnchainHeaderHash([128; 32]), + burn_block_height: 1, + sortition_id: SortitionId([1; 32]), + parent_sortition_id: SortitionId([0; 32]), + consensus_hash: view.cur_sortition.parent_tenure_id, + was_sortition: true, + first_block_mined: Some(StacksBlockId([2; 32])), + }, + ]; + + let block_proposal_1 = BlockProposal { + block: NakamotoBlock { + header: NakamotoBlockHeader { + version: 1, + chain_length: 10, + burn_spent: 10, + consensus_hash: last_sortition.consensus_hash, + parent_block_id: StacksBlockId([0; 32]), + tx_merkle_root: Sha512Trunc256Sum([0; 32]), + state_index_root: TrieHash([0; 32]), + timestamp: 11, + miner_signature: MessageSignature::empty(), + signer_signature: vec![], + pox_treatment: BitVec::ones(1).unwrap(), + }, + txs: vec![], + }, + burn_height: 2, + reward_cycle: 1, + }; + let mut block_info_1 = BlockInfo::from(block_proposal_1); + block_info_1.mark_signed_and_valid(); + signer_db.insert_block(&block_info_1).unwrap(); + + let sortition_time = SystemTime::UNIX_EPOCH + + Duration::from_secs(block_info_1.proposed_time + sortition_timing_secs); + signer_db + .insert_burn_block(&view.cur_sortition.burn_block_hash, 3, &sortition_time) + .unwrap(); + + let MockServerClient { server, client, .. } = MockServerClient::new(); + let h = std::thread::spawn(move || view.check_proposal(&client, &signer_db, &block, &block_pk)); + + crate::client::tests::write_response( + server, + format!("HTTP/1.1 200 Ok\n\n{}", serde_json::json!(expected_result)).as_bytes(), + ); + + let result = h.join().unwrap(); + info!("Result: {result:?}"); + result +} + +#[test] +fn check_proposal_reorg_timing_bad() { + let result = reorg_timing_testing("reorg_timing_bad", 30, 31); + assert!(!result.unwrap(), "Proposal should not validate, because the reorg occurred in a block whose proposed time was long enough before the sortition"); +} + +#[test] +fn check_proposal_reorg_timing_ok() { + let result = reorg_timing_testing("reorg_timing_okay", 30, 30); + assert!(result.unwrap(), "Proposal should validate okay, because the reorg occurred in a block whose proposed time was close to the sortition"); +} + +#[test] +fn check_proposal_invalid_status() { + let (stacks_client, signer_db, block_pk, mut view, mut block) = + setup_test_environment("invalid_status"); + block.header.consensus_hash = view.cur_sortition.consensus_hash; + assert!(view + .check_proposal(&stacks_client, &signer_db, &block, &block_pk) + .unwrap()); + view.cur_sortition.miner_status = SortitionMinerStatus::InvalidatedAfterFirstBlock; + assert!(!view + .check_proposal(&stacks_client, &signer_db, &block, &block_pk) + .unwrap()); + + block.header.consensus_hash = view.last_sortition.as_ref().unwrap().consensus_hash; + assert!(!view + .check_proposal(&stacks_client, &signer_db, &block, &block_pk) + .unwrap()); + + view.cur_sortition.miner_status = SortitionMinerStatus::InvalidatedBeforeFirstBlock; + block.header.consensus_hash = view.last_sortition.as_ref().unwrap().consensus_hash; + // this block passes the signer state checks, even though it doesn't have a tenure change tx. + // this is because the signer state does not perform the tenure change logic checks: it needs + // the stacks-node to do that (because the stacks-node actually knows whether or not their + // parent blocks have been seen before, while the signer state checks are only reasoning about + // stacks blocks seen by the signer, which may be a subset) + assert!(view + .check_proposal(&stacks_client, &signer_db, &block, &block_pk) + .unwrap()); +} + +fn make_tenure_change_payload() -> TenureChangePayload { + TenureChangePayload { + tenure_consensus_hash: ConsensusHash([0; 20]), + prev_tenure_consensus_hash: ConsensusHash([0; 20]), + burn_view_consensus_hash: ConsensusHash([0; 20]), + previous_tenure_end: StacksBlockId([0; 32]), + previous_tenure_blocks: 1, + cause: TenureChangeCause::Extended, + pubkey_hash: Hash160([0; 20]), + } +} + +fn make_tenure_change_tx(payload: TenureChangePayload) -> StacksTransaction { + StacksTransaction { + version: TransactionVersion::Testnet, + chain_id: 1, + auth: TransactionAuth::Standard(TransactionSpendingCondition::Singlesig( + SinglesigSpendingCondition { + hash_mode: SinglesigHashMode::P2PKH, + signer: Hash160([0; 20]), + nonce: 0, + tx_fee: 0, + key_encoding: TransactionPublicKeyEncoding::Compressed, + signature: MessageSignature([0; 65]), + }, + )), + anchor_mode: TransactionAnchorMode::Any, + post_condition_mode: TransactionPostConditionMode::Allow, + post_conditions: vec![], + payload: TransactionPayload::TenureChange(payload), + } +} + +#[test] +fn check_proposal_tenure_extend_invalid_conditions() { + let (stacks_client, signer_db, block_pk, mut view, mut block) = + setup_test_environment("tenure_extend"); + block.header.consensus_hash = view.cur_sortition.consensus_hash; + let mut extend_payload = make_tenure_change_payload(); + extend_payload.burn_view_consensus_hash = view.cur_sortition.consensus_hash; + extend_payload.tenure_consensus_hash = block.header.consensus_hash; + extend_payload.prev_tenure_consensus_hash = block.header.consensus_hash; + let tx = make_tenure_change_tx(extend_payload); + block.txs = vec![tx]; + assert!(!view + .check_proposal(&stacks_client, &signer_db, &block, &block_pk) + .unwrap()); + + let mut extend_payload = make_tenure_change_payload(); + extend_payload.burn_view_consensus_hash = ConsensusHash([64; 20]); + extend_payload.tenure_consensus_hash = block.header.consensus_hash; + extend_payload.prev_tenure_consensus_hash = block.header.consensus_hash; + let tx = make_tenure_change_tx(extend_payload); + block.txs = vec![tx]; + assert!(view + .check_proposal(&stacks_client, &signer_db, &block, &block_pk) + .unwrap()); +} + +#[test] +fn check_block_proposal_timeout() { + let (stacks_client, mut signer_db, block_pk, mut view, mut curr_sortition_block) = + setup_test_environment("block_proposal_timeout"); + curr_sortition_block.header.consensus_hash = view.cur_sortition.consensus_hash; + let mut last_sortition_block = curr_sortition_block.clone(); + last_sortition_block.header.consensus_hash = + view.last_sortition.as_ref().unwrap().consensus_hash; + + // Ensure we have a burn height to compare against + let burn_hash = view.cur_sortition.burn_block_hash; + let burn_height = 1; + let received_time = SystemTime::now(); + signer_db + .insert_burn_block(&burn_hash, burn_height, &received_time) + .unwrap(); + + assert!(view + .check_proposal(&stacks_client, &signer_db, &curr_sortition_block, &block_pk) + .unwrap()); + + assert!(!view + .check_proposal(&stacks_client, &signer_db, &last_sortition_block, &block_pk) + .unwrap()); + + // Sleep a bit to time out the block proposal + std::thread::sleep(Duration::from_secs(5)); + assert!(!view + .check_proposal(&stacks_client, &signer_db, &curr_sortition_block, &block_pk) + .unwrap()); + + assert!(view + .check_proposal(&stacks_client, &signer_db, &last_sortition_block, &block_pk) + .unwrap()); +} + +#[test] +fn check_sortition_timeout() { + let signer_db_dir = "/tmp/stacks-node-tests/signer-units/"; + let signer_db_path = format!( + "{signer_db_dir}/sortition_timeout.{}.sqlite", + get_epoch_time_secs() + ); + fs::create_dir_all(signer_db_dir).unwrap(); + let mut signer_db = SignerDb::new(signer_db_path).unwrap(); + + let mut sortition = SortitionState { + miner_pkh: Hash160([0; 20]), + miner_pubkey: None, + prior_sortition: ConsensusHash([0; 20]), + parent_tenure_id: ConsensusHash([0; 20]), + consensus_hash: ConsensusHash([1; 20]), + miner_status: SortitionMinerStatus::Valid, + burn_header_timestamp: 2, + burn_block_hash: BurnchainHeaderHash([1; 32]), + }; + // Ensure we have a burn height to compare against + let burn_hash = sortition.burn_block_hash; + let burn_height = 1; + let received_time = SystemTime::now(); + signer_db + .insert_burn_block(&burn_hash, burn_height, &received_time) + .unwrap(); + + std::thread::sleep(Duration::from_secs(1)); + // We have not yet timed out + assert!(!sortition + .is_timed_out(Duration::from_secs(10), &signer_db) + .unwrap()); + // We are a valid sortition, have an empty tenure, and have now timed out + assert!(sortition + .is_timed_out(Duration::from_secs(1), &signer_db) + .unwrap()); + // This will not be marked as timed out as the status is no longer valid + sortition.miner_status = SortitionMinerStatus::InvalidatedAfterFirstBlock; + assert!(!sortition + .is_timed_out(Duration::from_secs(1), &signer_db) + .unwrap()); + + // Revert the status to continue other checks + sortition.miner_status = SortitionMinerStatus::Valid; + // Insert a signed over block so its no longer an empty tenure + let block_proposal = BlockProposal { + block: NakamotoBlock { + header: NakamotoBlockHeader { + version: 1, + chain_length: 10, + burn_spent: 10, + consensus_hash: sortition.consensus_hash, + parent_block_id: StacksBlockId([0; 32]), + tx_merkle_root: Sha512Trunc256Sum([0; 32]), + state_index_root: TrieHash([0; 32]), + timestamp: 11, + miner_signature: MessageSignature::empty(), + signer_signature: vec![], + pox_treatment: BitVec::ones(1).unwrap(), + }, + txs: vec![], + }, + burn_height: 2, + reward_cycle: 1, + }; + + let mut block_info = BlockInfo::from(block_proposal); + block_info.signed_over = true; + signer_db.insert_block(&block_info).unwrap(); + + // This will no longer be timed out as we have a non-empty tenure + assert!(!sortition + .is_timed_out(Duration::from_secs(1), &signer_db) + .unwrap()); +} diff --git a/stacks-signer/src/tests/conf/signer-0.toml b/stacks-signer/src/tests/conf/signer-0.toml index 32183e0e797..19002c1914d 100644 --- a/stacks-signer/src/tests/conf/signer-0.toml +++ b/stacks-signer/src/tests/conf/signer-0.toml @@ -4,3 +4,4 @@ endpoint = "localhost:30000" network = "testnet" auth_password = "12345" db_path = ":memory:" +metrics_endpoint = "0.0.0.0:9090" \ No newline at end of file diff --git a/stacks-signer/src/tests/mod.rs b/stacks-signer/src/tests/mod.rs new file mode 100644 index 00000000000..a92c85da71b --- /dev/null +++ b/stacks-signer/src/tests/mod.rs @@ -0,0 +1 @@ +mod chainstate; diff --git a/stacks-signer/src/v0/mod.rs b/stacks-signer/src/v0/mod.rs new file mode 100644 index 00000000000..520fb36ca1c --- /dev/null +++ b/stacks-signer/src/v0/mod.rs @@ -0,0 +1,25 @@ +// Copyright (C) 2013-2020 Blockstack PBC, a public benefit corporation +// Copyright (C) 2020-2024 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +/// The signer module for processing events +pub mod signer; + +use libsigner::v0::messages::SignerMessage; + +use crate::v0::signer::Signer; + +/// A v0 spawned signer +pub type SpawnedSigner = crate::SpawnedSigner; diff --git a/stacks-signer/src/v0/signer.rs b/stacks-signer/src/v0/signer.rs new file mode 100644 index 00000000000..c32af06f3fd --- /dev/null +++ b/stacks-signer/src/v0/signer.rs @@ -0,0 +1,513 @@ +// Copyright (C) 2020-2024 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . +use std::fmt::Debug; +use std::sync::mpsc::Sender; + +use blockstack_lib::net::api::postblock_proposal::BlockValidateResponse; +use clarity::consts::{CHAIN_ID_MAINNET, CHAIN_ID_TESTNET}; +use clarity::types::chainstate::StacksPrivateKey; +use clarity::types::{PrivateKey, StacksEpochId}; +use clarity::util::hash::MerkleHashFunc; +use clarity::util::secp256k1::Secp256k1PublicKey; +use libsigner::v0::messages::{ + BlockResponse, MessageSlotID, MockSignature, RejectCode, SignerMessage, +}; +use libsigner::{BlockProposal, SignerEvent}; +use slog::{slog_debug, slog_error, slog_info, slog_warn}; +use stacks_common::types::chainstate::StacksAddress; +use stacks_common::{debug, error, info, warn}; + +use crate::chainstate::{ProposalEvalConfig, SortitionsView}; +use crate::client::{SignerSlotID, StackerDB, StacksClient}; +use crate::config::SignerConfig; +use crate::runloop::{RunLoopCommand, SignerResult}; +use crate::signerdb::{BlockInfo, SignerDb}; +use crate::Signer as SignerTrait; + +/// The stacks signer registered for the reward cycle +#[derive(Debug)] +pub struct Signer { + /// The private key of the signer + private_key: StacksPrivateKey, + /// The stackerdb client + pub stackerdb: StackerDB, + /// Whether the signer is a mainnet signer or not + pub mainnet: bool, + /// The signer id + pub signer_id: u32, + /// The signer slot ids for the signers in the reward cycle + pub signer_slot_ids: Vec, + /// The addresses of other signers + pub signer_addresses: Vec, + /// The reward cycle this signer belongs to + pub reward_cycle: u64, + /// SignerDB for state management + pub signer_db: SignerDb, + /// Configuration for proposal evaluation + pub proposal_config: ProposalEvalConfig, +} + +impl std::fmt::Display for Signer { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "Cycle #{} Signer #{}", self.reward_cycle, self.signer_id,) + } +} + +impl SignerTrait for Signer { + /// Create a new signer from the given configuration + fn new(config: SignerConfig) -> Self { + Self::from(config) + } + + /// Return the reward cycle of the signer + fn reward_cycle(&self) -> u64 { + self.reward_cycle + } + + /// Process the event + fn process_event( + &mut self, + stacks_client: &StacksClient, + sortition_state: &mut Option, + event: Option<&SignerEvent>, + _res: Sender>, + current_reward_cycle: u64, + ) { + let event_parity = match event { + // Block proposal events do have reward cycles, but each proposal has its own cycle, + // and the vec could be heterogeneous, so, don't differentiate. + Some(SignerEvent::BlockValidationResponse(_)) + | Some(SignerEvent::MinerMessages(..)) + | Some(SignerEvent::NewBurnBlock { .. }) + | Some(SignerEvent::StatusCheck) + | None => None, + Some(SignerEvent::SignerMessages(msg_parity, ..)) => Some(u64::from(*msg_parity) % 2), + }; + let other_signer_parity = (self.reward_cycle + 1) % 2; + if event_parity == Some(other_signer_parity) { + return; + } + debug!("{self}: Processing event: {event:?}"); + let Some(event) = event else { + // No event. Do nothing. + debug!("{self}: No event received"); + return; + }; + match event { + SignerEvent::BlockValidationResponse(block_validate_response) => { + debug!("{self}: Received a block proposal result from the stacks node..."); + self.handle_block_validate_response(block_validate_response) + } + SignerEvent::SignerMessages(_signer_set, messages) => { + debug!( + "{self}: Received {} messages from the other signers. Ignoring...", + messages.len() + ); + } + SignerEvent::MinerMessages(messages, miner_pubkey) => { + debug!( + "{self}: Received {} messages from the miner", + messages.len(); + ); + for message in messages { + match message { + SignerMessage::BlockProposal(block_proposal) => { + self.handle_block_proposal( + stacks_client, + sortition_state, + block_proposal, + miner_pubkey, + ); + } + SignerMessage::BlockPushed(b) => { + let block_push_result = stacks_client.post_block(b); + info!( + "{self}: Got block pushed message"; + "block_id" => %b.block_id(), + "signer_sighash" => %b.header.signer_signature_hash(), + "push_result" => ?block_push_result, + ); + } + _ => {} + } + } + } + SignerEvent::StatusCheck => { + debug!("{self}: Received a status check event."); + } + SignerEvent::NewBurnBlock { + burn_height, + burn_header_hash, + received_time, + } => { + info!("{self}: Received a new burn block event for block height {burn_height}"); + if let Err(e) = + self.signer_db + .insert_burn_block(burn_header_hash, *burn_height, received_time) + { + warn!( + "Failed to write burn block event to signerdb"; + "err" => ?e, + "burn_header_hash" => %burn_header_hash, + "burn_height" => burn_height + ); + } + *sortition_state = None; + let epoch = match stacks_client.get_node_epoch() { + Ok(epoch) => epoch, + Err(e) => { + warn!("{self}: Failed to determine node epoch. Cannot mock sign: {e}"); + return; + } + }; + debug!("{self}: Epoch 2.5 signer received a new burn block event."; + "burn_height" => burn_height, + "current_reward_cycle" => current_reward_cycle, + "epoch" => ?epoch + ); + if epoch == StacksEpochId::Epoch25 && self.reward_cycle == current_reward_cycle { + // We are in epoch 2.5, so we should mock mine to prove we are still alive. + self.mock_sign(*burn_height, stacks_client); + } + } + } + } + + fn process_command( + &mut self, + _stacks_client: &StacksClient, + _current_reward_cycle: u64, + command: Option, + ) { + if let Some(command) = command { + warn!("{self}: Received a command: {command:?}. V0 Signers do not support commands. Ignoring...") + } + } + + fn has_pending_blocks(&self) -> bool { + self.signer_db + .has_pending_blocks(self.reward_cycle) + .unwrap_or_else(|e| { + error!("{self}: Failed to check for pending blocks: {e:?}",); + // Assume we have pending blocks to prevent premature cleanup + true + }) + } +} + +impl From for Signer { + fn from(signer_config: SignerConfig) -> Self { + let stackerdb = StackerDB::from(&signer_config); + debug!( + "Reward cycle #{} Signer #{}", + signer_config.reward_cycle, signer_config.signer_id, + ); + let signer_db = + SignerDb::new(&signer_config.db_path).expect("Failed to connect to signer Db"); + let proposal_config = ProposalEvalConfig::from(&signer_config); + Self { + private_key: signer_config.stacks_private_key, + stackerdb, + mainnet: signer_config.mainnet, + signer_id: signer_config.signer_id, + signer_addresses: signer_config + .signer_entries + .signer_ids + .into_keys() + .collect(), + signer_slot_ids: signer_config.signer_slot_ids.clone(), + reward_cycle: signer_config.reward_cycle, + signer_db, + proposal_config, + } + } +} + +impl Signer { + /// Determine this signers response to a proposed block + /// Returns a BlockResponse if we have already validated the block + /// Returns None otherwise + fn determine_response(&self, block_info: &BlockInfo) -> Option { + let valid = block_info.valid?; + let response = if valid { + debug!("{self}: Accepting block {}", block_info.block.block_id()); + let signature = self + .private_key + .sign(block_info.signer_signature_hash().bits()) + .expect("Failed to sign block"); + BlockResponse::accepted(block_info.signer_signature_hash(), signature) + } else { + debug!("{self}: Rejecting block {}", block_info.block.block_id()); + BlockResponse::rejected( + block_info.signer_signature_hash(), + RejectCode::RejectedInPriorRound, + ) + }; + Some(response) + } + + /// Handle block proposal messages submitted to signers stackerdb + fn handle_block_proposal( + &mut self, + stacks_client: &StacksClient, + sortition_state: &mut Option, + block_proposal: &BlockProposal, + miner_pubkey: &Secp256k1PublicKey, + ) { + debug!("{self}: Received a block proposal: {block_proposal:?}"); + if block_proposal.reward_cycle != self.reward_cycle { + // We are not signing for this reward cycle. Ignore the block. + debug!( + "{self}: Received a block proposal for a different reward cycle. Ignore it."; + "requested_reward_cycle" => block_proposal.reward_cycle + ); + return; + } + // TODO: should add a check to ignore an old burn block height if we know its oudated. Would require us to store the burn block height we last saw on the side. + // the signer needs to be able to determine whether or not the block they're about to sign would conflict with an already-signed Stacks block + let signer_signature_hash = block_proposal.block.header.signer_signature_hash(); + if let Some(block_info) = self + .signer_db + .block_lookup(self.reward_cycle, &signer_signature_hash) + .expect("Failed to connect to signer DB") + { + let Some(block_response) = self.determine_response(&block_info) else { + // We are still waiting for a response for this block. Do nothing. + debug!("{self}: Received a block proposal for a block we are already validating."; + "signer_sighash" => %signer_signature_hash, + "block_id" => %block_proposal.block.block_id() + ); + return; + }; + // Submit a proposal response to the .signers contract for miners + debug!("{self}: Broadcasting a block response to stacks node: {block_response:?}"); + if let Err(e) = self + .stackerdb + .send_message_with_retry::(block_response.into()) + { + warn!("{self}: Failed to send block rejection to stacker-db: {e:?}",); + } + return; + } + + info!( + "{self}: received a block proposal for a new block. Submit block for validation. "; + "signer_sighash" => %signer_signature_hash, + "block_id" => %block_proposal.block.block_id(), + ); + crate::monitoring::increment_block_proposals_received(); + let mut block_info = BlockInfo::from(block_proposal.clone()); + + // Get sortition view if we don't have it + if sortition_state.is_none() { + *sortition_state = + SortitionsView::fetch_view(self.proposal_config.clone(), stacks_client) + .inspect_err(|e| { + warn!( + "{self}: Failed to update sortition view: {e:?}"; + "signer_sighash" => %signer_signature_hash, + "block_id" => %block_proposal.block.block_id(), + ) + }) + .ok(); + } + + // Check if proposal can be rejected now if not valid against sortition view + let block_response = if let Some(sortition_state) = sortition_state { + match sortition_state.check_proposal( + stacks_client, + &self.signer_db, + &block_proposal.block, + miner_pubkey, + ) { + // Error validating block + Err(e) => { + warn!( + "{self}: Error checking block proposal: {e:?}"; + "signer_sighash" => %signer_signature_hash, + "block_id" => %block_proposal.block.block_id(), + ); + Some(BlockResponse::rejected( + block_proposal.block.header.signer_signature_hash(), + RejectCode::ConnectivityIssues, + )) + } + // Block proposal is bad + Ok(false) => { + warn!( + "{self}: Block proposal invalid"; + "signer_sighash" => %signer_signature_hash, + "block_id" => %block_proposal.block.block_id(), + ); + Some(BlockResponse::rejected( + block_proposal.block.header.signer_signature_hash(), + RejectCode::SortitionViewMismatch, + )) + } + // Block proposal passed check, still don't know if valid + Ok(true) => None, + } + } else { + warn!( + "{self}: Cannot validate block, no sortition view"; + "signer_sighash" => %signer_signature_hash, + "block_id" => %block_proposal.block.block_id(), + ); + Some(BlockResponse::rejected( + block_proposal.block.header.signer_signature_hash(), + RejectCode::NoSortitionView, + )) + }; + + if let Some(block_response) = block_response { + // We know proposal is invalid. Send rejection message, do not do further validation + block_info.valid = Some(false); + debug!("{self}: Broadcasting a block response to stacks node: {block_response:?}"); + let res = self + .stackerdb + .send_message_with_retry::(block_response.into()); + + match res { + Err(e) => warn!("{self}: Failed to send block rejection to stacker-db: {e:?}"), + Ok(ack) if !ack.accepted => warn!( + "{self}: Block rejection not accepted by stacker-db: {:?}", + ack.reason + ), + Ok(_) => debug!("{self}: Block rejection accepted by stacker-db"), + } + } else { + // We don't know if proposal is valid, submit to stacks-node for further checks + stacks_client + .submit_block_for_validation(block_info.block.clone()) + .unwrap_or_else(|e| { + warn!("{self}: Failed to submit block for validation: {e:?}"); + }); + } + + self.signer_db + .insert_block(&block_info) + .unwrap_or_else(|_| panic!("{self}: Failed to insert block in DB")); + } + + /// Handle the block validate response returned from our prior calls to submit a block for validation + fn handle_block_validate_response(&mut self, block_validate_response: &BlockValidateResponse) { + info!("{self}: Received a block validate response: {block_validate_response:?}"); + let (response, block_info) = match block_validate_response { + BlockValidateResponse::Ok(block_validate_ok) => { + crate::monitoring::increment_block_validation_responses(true); + let signer_signature_hash = block_validate_ok.signer_signature_hash; + // For mutability reasons, we need to take the block_info out of the map and add it back after processing + let mut block_info = match self + .signer_db + .block_lookup(self.reward_cycle, &signer_signature_hash) + { + Ok(Some(block_info)) => block_info, + Ok(None) => { + // We have not seen this block before. Why are we getting a response for it? + debug!("{self}: Received a block validate response for a block we have not seen before. Ignoring..."); + return; + } + Err(e) => { + error!("{self}: Failed to lookup block in signer db: {e:?}",); + return; + } + }; + block_info.mark_signed_and_valid(); + let signature = self + .private_key + .sign(&signer_signature_hash.0) + .expect("Failed to sign block"); + ( + BlockResponse::accepted(signer_signature_hash, signature), + block_info, + ) + } + BlockValidateResponse::Reject(block_validate_reject) => { + crate::monitoring::increment_block_validation_responses(false); + let signer_signature_hash = block_validate_reject.signer_signature_hash; + let mut block_info = match self + .signer_db + .block_lookup(self.reward_cycle, &signer_signature_hash) + { + Ok(Some(block_info)) => block_info, + Ok(None) => { + // We have not seen this block before. Why are we getting a response for it? + debug!("{self}: Received a block validate response for a block we have not seen before. Ignoring..."); + return; + } + Err(e) => { + error!("{self}: Failed to lookup block in signer db: {e:?}"); + return; + } + }; + block_info.valid = Some(false); + ( + BlockResponse::from(block_validate_reject.clone()), + block_info, + ) + } + }; + // Submit a proposal response to the .signers contract for miners + info!( + "{self}: Broadcasting a block response to stacks node: {response:?}"; + "signer_sighash" => %block_info.signer_signature_hash(), + ); + match self + .stackerdb + .send_message_with_retry::(response.clone().into()) + { + Ok(_) => { + let accepted = matches!(response, BlockResponse::Accepted(..)); + crate::monitoring::increment_block_responses_sent(accepted); + } + Err(e) => { + warn!("{self}: Failed to send block rejection to stacker-db: {e:?}",); + } + } + self.signer_db + .insert_block(&block_info) + .unwrap_or_else(|_| panic!("{self}: Failed to insert block in DB")); + } + + /// Send a mock signature to stackerdb to prove we are still alive + fn mock_sign(&mut self, burn_block_height: u64, stacks_client: &StacksClient) { + let Ok(peer_info) = stacks_client.get_peer_info() else { + warn!("{self}: Failed to get peer info. Cannot mock sign."); + return; + }; + let chain_id = if self.mainnet { + CHAIN_ID_MAINNET + } else { + CHAIN_ID_TESTNET + }; + info!("Mock signing for burn block {burn_block_height:?}"; + "stacks_tip_consensus_hash" => ?peer_info.stacks_tip_consensus_hash.clone(), + "stacks_tip" => ?peer_info.stacks_tip.clone(), + "peer_burn_block_height" => peer_info.burn_block_height, + "pox_consensus" => ?peer_info.pox_consensus.clone(), + "server_version" => peer_info.server_version.clone(), + "chain_id" => chain_id + ); + let mock_signature = + MockSignature::new(burn_block_height, peer_info, chain_id, &self.private_key); + let message = SignerMessage::MockSignature(mock_signature); + if let Err(e) = self + .stackerdb + .send_message_with_retry::(message) + { + warn!("{self}: Failed to send mock signature to stacker-db: {e:?}",); + } + } +} diff --git a/stacks-signer/src/coordinator.rs b/stacks-signer/src/v1/coordinator.rs similarity index 95% rename from stacks-signer/src/coordinator.rs rename to stacks-signer/src/v1/coordinator.rs index 7469c0ff184..7fc2d238c48 100644 --- a/stacks-signer/src/coordinator.rs +++ b/stacks-signer/src/v1/coordinator.rs @@ -91,17 +91,10 @@ impl CoordinatorSelector { } } new_index + } else if ROTATE_COORDINATORS { + self.coordinator_index.saturating_add(1) % self.coordinator_ids.len() } else { - if ROTATE_COORDINATORS { - let mut new_index = self.coordinator_index.saturating_add(1); - if new_index == self.coordinator_ids.len() { - // We have exhausted all potential coordinators. Go back to the start - new_index = 0; - } - new_index - } else { - self.coordinator_index - } + self.coordinator_index }; self.coordinator_id = *self .coordinator_ids diff --git a/stacks-signer/src/v1/mod.rs b/stacks-signer/src/v1/mod.rs new file mode 100644 index 00000000000..ed1d9800165 --- /dev/null +++ b/stacks-signer/src/v1/mod.rs @@ -0,0 +1,29 @@ +// Copyright (C) 2013-2020 Blockstack PBC, a public benefit corporation +// Copyright (C) 2020-2024 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use libsigner::v1::messages::SignerMessage; + +use crate::v1::signer::Signer; + +/// The coordinator selector for the signer +pub mod coordinator; +/// The signer module for processing events +pub mod signer; +/// The stackerdb module for sending messages between signers and miners +pub mod stackerdb_manager; + +/// A v1 spawned signer +pub type SpawnedSigner = crate::SpawnedSigner; diff --git a/stacks-signer/src/signer.rs b/stacks-signer/src/v1/signer.rs similarity index 60% rename from stacks-signer/src/signer.rs rename to stacks-signer/src/v1/signer.rs index 4d23a92c074..8212586beb7 100644 --- a/stacks-signer/src/signer.rs +++ b/stacks-signer/src/v1/signer.rs @@ -14,6 +14,7 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . use std::collections::VecDeque; +use std::fmt::Debug; use std::path::PathBuf; use std::sync::mpsc::Sender; use std::time::Instant; @@ -24,21 +25,23 @@ use blockstack_lib::chainstate::nakamoto::{NakamotoBlock, NakamotoBlockVote}; use blockstack_lib::chainstate::stacks::boot::SIGNERS_VOTING_FUNCTION_NAME; use blockstack_lib::chainstate::stacks::StacksTransaction; use blockstack_lib::net::api::postblock_proposal::BlockValidateResponse; +use blockstack_lib::util_lib::db::Error as DBError; use hashbrown::HashSet; -use libsigner::{ - BlockProposalSigners, BlockRejection, BlockResponse, MessageSlotID, RejectCode, SignerEvent, - SignerMessage, +use libsigner::v1::messages::{ + BlockRejection, BlockResponse, MessageSlotID, RejectCode, SignerMessage, }; -use serde_derive::{Deserialize, Serialize}; +use libsigner::{BlockProposal, SignerEvent}; +use rand_core::OsRng; use slog::{slog_debug, slog_error, slog_info, slog_warn}; use stacks_common::codec::{read_next, StacksMessageCodec}; use stacks_common::types::chainstate::{ConsensusHash, StacksAddress}; use stacks_common::types::StacksEpochId; use stacks_common::util::hash::Sha512Trunc256Sum; use stacks_common::{debug, error, info, warn}; -use wsts::common::{MerkleRoot, Signature}; +use wsts::common::Signature; use wsts::curve::keys::PublicKey; use wsts::curve::point::Point; +use wsts::curve::scalar::Scalar; use wsts::net::{Message, NonceRequest, Packet, SignatureShareRequest}; use wsts::state_machine::coordinator::fire::Coordinator as FireCoordinator; use wsts::state_machine::coordinator::{ @@ -49,91 +52,37 @@ use wsts::state_machine::{OperationResult, SignError}; use wsts::traits::Signer as _; use wsts::v2; -use crate::client::{retry_with_exponential_backoff, ClientError, StackerDB, StacksClient}; +use super::stackerdb_manager::StackerDBManager; +use crate::chainstate::SortitionsView; +use crate::client::{ClientError, SignerSlotID, StacksClient}; use crate::config::SignerConfig; -use crate::coordinator::CoordinatorSelector; -use crate::signerdb::SignerDb; +use crate::runloop::{RunLoopCommand, SignerCommand, SignerResult}; +use crate::signerdb::{BlockInfo, SignerDb}; +use crate::v1::coordinator::CoordinatorSelector; +use crate::Signer as SignerTrait; -/// The signer StackerDB slot ID, purposefully wrapped to prevent conflation with SignerID -#[derive(Debug, Clone, PartialEq, Eq, Hash, Copy, PartialOrd, Ord)] -pub struct SignerSlotID(pub u32); - -impl std::fmt::Display for SignerSlotID { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!(f, "{}", self.0) - } -} - -/// Additional Info about a proposed block -#[derive(Serialize, Deserialize, Debug, PartialEq)] -pub struct BlockInfo { - /// The block we are considering - pub block: NakamotoBlock, - /// Our vote on the block if we have one yet - pub vote: Option, - /// Whether the block contents are valid - valid: Option, - /// The associated packet nonce request if we have one - nonce_request: Option, - /// Whether this block is already being signed over - pub signed_over: bool, -} - -impl BlockInfo { - /// Create a new BlockInfo - pub const fn new(block: NakamotoBlock) -> Self { - Self { - block, - vote: None, - valid: None, - nonce_request: None, - signed_over: false, - } - } - - /// Create a new BlockInfo with an associated nonce request packet - pub const fn new_with_request(block: NakamotoBlock, nonce_request: NonceRequest) -> Self { - Self { - block, - vote: None, - valid: None, - nonce_request: Some(nonce_request), - signed_over: true, - } - } - - /// Return the block's signer signature hash - pub fn signer_signature_hash(&self) -> Sha512Trunc256Sum { - self.block.header.signer_signature_hash() - } -} - -/// Which signer operation to perform -#[derive(PartialEq, Clone, Debug)] -pub enum Command { - /// Generate a DKG aggregate public key +/// The specific operations that a signer can perform +#[derive(PartialEq, Eq, Debug, Clone)] +pub enum Operation { + /// A DKG operation Dkg, - /// Sign a message - Sign { - /// The block to sign over - block: NakamotoBlock, - /// Whether to make a taproot signature - is_taproot: bool, - /// Taproot merkle root - merkle_root: Option, - }, + /// A Sign operation + Sign, } /// The Signer state #[derive(PartialEq, Eq, Debug, Clone)] pub enum State { + /// The signer is uninitialized and should read stackerdb to restore state + Uninitialized, /// The signer is idle, waiting for messages and commands Idle, /// The signer is executing a DKG or Sign round - OperationInProgress, + OperationInProgress(Operation), } /// The stacks signer registered for the reward cycle +#[derive(Debug)] pub struct Signer { /// The coordinator for inbound messages for a specific reward cycle pub coordinator: FireCoordinator, @@ -142,9 +91,9 @@ pub struct Signer { /// the state of the signer pub state: State, /// Received Commands that need to be processed - pub commands: VecDeque, - /// The stackerdb client - pub stackerdb: StackerDB, + pub commands: VecDeque, + /// The stackerdb client session manager + pub stackerdb_manager: StackerDBManager, /// Whether the signer is a mainnet signer or not pub mainnet: bool, /// The signer id @@ -159,8 +108,11 @@ pub struct Signer { pub next_signer_addresses: Vec, /// The reward cycle this signer belongs to pub reward_cycle: u64, - /// The tx fee in uSTX to use if the epoch is pre Nakamoto (Epoch 3.0) + /// The default tx fee in uSTX to use when the epoch is pre Nakamoto (Epoch 3.0). pub tx_fee_ustx: u64, + /// If estimating the tx fee, the max tx fee in uSTX to use when the epoch is pre Nakamoto (Epoch 3.0) + /// If None, will not cap the fee. + pub max_tx_fee_ustx: Option, /// The coordinator info for the signer pub coordinator_selector: CoordinatorSelector, /// The approved key registered to the contract @@ -185,10 +137,198 @@ impl std::fmt::Display for Signer { } } +impl SignerTrait for Signer { + /// Create a new signer from the given configuration + fn new(config: SignerConfig) -> Self { + Self::from(config) + } + + /// Return the reward cycle of the signer + fn reward_cycle(&self) -> u64 { + self.reward_cycle + } + + /// Process the event + fn process_event( + &mut self, + stacks_client: &StacksClient, + _sortition_state: &mut Option, + event: Option<&SignerEvent>, + res: Sender>, + current_reward_cycle: u64, + ) { + let event_parity = match event { + Some(SignerEvent::BlockValidationResponse(_)) => Some(current_reward_cycle % 2), + // Block proposal events do have reward cycles, but each proposal has its own cycle, + // and the vec could be heterogeneous, so, don't differentiate. + Some(SignerEvent::MinerMessages(..)) + | Some(SignerEvent::NewBurnBlock { .. }) + | Some(SignerEvent::StatusCheck) + | None => None, + Some(SignerEvent::SignerMessages(msg_parity, ..)) => Some(u64::from(*msg_parity) % 2), + }; + let other_signer_parity = (self.reward_cycle + 1) % 2; + if event_parity == Some(other_signer_parity) { + return; + } + if self.approved_aggregate_public_key.is_none() { + if let Err(e) = self.refresh_dkg(stacks_client, res.clone(), current_reward_cycle) { + error!("{self}: failed to refresh DKG: {e}"); + } + } + self.refresh_coordinator(); + if self.approved_aggregate_public_key.is_none() { + if let Err(e) = self.refresh_dkg(stacks_client, res.clone(), current_reward_cycle) { + error!("{self}: failed to refresh DKG: {e}"); + } + } + self.refresh_coordinator(); + debug!("{self}: Processing event: {event:?}"); + let Some(event) = event else { + // No event. Do nothing. + debug!("{self}: No event received"); + return; + }; + match event { + SignerEvent::BlockValidationResponse(block_validate_response) => { + info!("{self}: Received a block proposal result from the stacks node..."); + self.handle_block_validate_response( + stacks_client, + block_validate_response, + res, + current_reward_cycle, + ) + } + SignerEvent::SignerMessages(signer_set, messages) => { + if *signer_set != self.stackerdb_manager.get_signer_set() { + debug!("{self}: Received a signer message for a reward cycle that does not belong to this signer. Ignoring..."); + return; + } + debug!( + "{self}: Received {} messages from the other signers...", + messages.len() + ); + self.handle_signer_messages(stacks_client, res, messages, current_reward_cycle); + } + SignerEvent::MinerMessages(messages, miner_key) => { + let miner_key = PublicKey::try_from(miner_key.to_bytes_compressed().as_slice()) + .expect("FATAL: could not convert from StacksPublicKey to PublicKey"); + self.miner_key = Some(miner_key); + if current_reward_cycle != self.reward_cycle { + // There is not point in processing blocks if we are not the current reward cycle (we can never actually contribute to signing these blocks) + debug!("{self}: Received a proposed block, but this signer's reward cycle is not the current one ({current_reward_cycle}). Ignoring..."); + return; + } + debug!( + "{self}: Received {} messages from the miner", + messages.len(); + "miner_key" => ?miner_key, + ); + self.handle_signer_messages(stacks_client, res, messages, current_reward_cycle); + } + SignerEvent::StatusCheck => { + debug!("{self}: Received a status check event.") + } + SignerEvent::NewBurnBlock { + burn_height, + burn_header_hash, + received_time, + } => { + info!("{self}: Received a new burn block event for block height {burn_height}"); + if let Err(e) = + self.signer_db + .insert_burn_block(burn_header_hash, *burn_height, received_time) + { + warn!( + "Failed to write burn block event to signerdb"; + "err" => ?e, + "burn_header_hash" => %burn_header_hash, + "burn_height" => burn_height + ); + } + } + } + } + + fn process_command( + &mut self, + stacks_client: &StacksClient, + current_reward_cycle: u64, + command: Option, + ) { + if let Some(command) = command { + let reward_cycle = command.reward_cycle; + if self.reward_cycle != reward_cycle { + warn!( + "{self}: not registered for reward cycle {reward_cycle}. Ignoring command: {command:?}" + ); + } else { + info!( + "{self}: Queuing an external runloop command ({:?}): {command:?}", + self.state_machine.public_keys.signers.get(&self.signer_id) + ); + self.commands.push_back(command.command); + } + } + self.process_next_command(stacks_client, current_reward_cycle); + } + + fn has_pending_blocks(&self) -> bool { + self.signer_db + .has_pending_blocks(self.reward_cycle) + .unwrap_or_else(|e| { + error!("{self}: Failed to check if there are pending blocks: {e:?}"); + // Assume there are pending blocks to prevent premature cleanup + true + }) + } +} + impl Signer { - /// Return the current coordinator. If in the active reward cycle, this is the miner, - /// so the first element of the tuple will be None (because the miner does not have a signer index). - fn get_coordinator(&self, current_reward_cycle: u64) -> (Option, PublicKey) { + /// Attempt to process the next command in the queue, and update state accordingly + fn process_next_command(&mut self, stacks_client: &StacksClient, current_reward_cycle: u64) { + match &self.state { + State::Uninitialized => { + // We cannot process any commands until we have restored our state + warn!("{self}: Cannot process commands until state is restored. Waiting..."); + } + State::Idle => { + let Some(command) = self.commands.front() else { + debug!("{self}: Nothing to process. Waiting for command..."); + return; + }; + let coordinator_id = if matches!(command, SignerCommand::Dkg) { + // We cannot execute a DKG command if we are not the coordinator + Some(self.get_coordinator_dkg().0) + } else { + self.get_coordinator_sign(current_reward_cycle).0 + }; + if coordinator_id != Some(self.signer_id) { + debug!( + "{self}: Coordinator is {coordinator_id:?}. Will not process any commands...", + ); + return; + } + let command = self + .commands + .pop_front() + .expect("BUG: Already asserted that the command queue was not empty"); + self.execute_command(stacks_client, &command); + } + State::OperationInProgress(op) => { + // We cannot execute the next command until the current one is finished... + debug!( + "{self}: Waiting for {op:?} operation to finish. Coordinator state = {:?}", + self.coordinator.state + ); + } + } + } + /// Return the current coordinator. + /// If the current reward cycle is the active reward cycle, this is the miner, + /// so the first element of the tuple will be None (because the miner does not have a signer index). + /// Otherwise, the coordinator is the signer with the index returned by the coordinator selector. + fn get_coordinator_sign(&self, current_reward_cycle: u64) -> (Option, PublicKey) { if self.reward_cycle == current_reward_cycle { let Some(ref cur_miner) = self.miner_key else { error!( @@ -199,17 +339,72 @@ impl Signer { return (Some(selected.0), selected.1); }; // coordinator is the current miner. - (None, cur_miner.clone()) + (None, *cur_miner) } else { let selected = self.coordinator_selector.get_coordinator(); - return (Some(selected.0), selected.1); + (Some(selected.0), selected.1) } } + + /// Refresh the next signer data from the given configuration data + #[allow(dead_code)] + fn update_signer(&mut self, new_signer_config: &SignerConfig) { + self.next_signer_addresses = new_signer_config + .signer_entries + .signer_ids + .keys() + .copied() + .collect(); + self.next_signer_slot_ids = new_signer_config.signer_slot_ids.clone(); + } + + /// Get the current coordinator for executing DKG + /// This will always use the coordinator selector to determine the coordinator + fn get_coordinator_dkg(&self) -> (u32, PublicKey) { + self.coordinator_selector.get_coordinator() + } + + /// Read stackerdb messages in case the signer was started late or restarted and missed incoming DKG messages + pub fn read_dkg_stackerdb_messages( + &mut self, + stacks_client: &StacksClient, + res: Sender>, + current_reward_cycle: u64, + ) -> Result<(), ClientError> { + if self.state != State::Uninitialized { + // We should only read stackerdb if we are uninitialized + return Ok(()); + } + let ordered_packets = self + .stackerdb_manager + .get_dkg_packets(&self.signer_slot_ids)? + .iter() + .filter_map(|packet| { + let coordinator_pubkey = if Self::is_dkg_message(&packet.msg) { + self.get_coordinator_dkg().1 + } else { + debug!( + "{self}: Received a non-DKG message in the DKG message queue. Ignoring it." + ); + return None; + }; + self.verify_packet(stacks_client, packet.clone(), &coordinator_pubkey) + }) + .collect::>(); + // We successfully read stackerdb so we are no longer uninitialized + self.state = State::Idle; + debug!( + "{self}: Processing {} DKG messages from stackerdb: {ordered_packets:?}", + ordered_packets.len() + ); + self.handle_packets(stacks_client, res, &ordered_packets, current_reward_cycle); + Ok(()) + } } impl From for Signer { fn from(signer_config: SignerConfig) -> Self { - let stackerdb = StackerDB::from(&signer_config); + let mut stackerdb_manager = StackerDBManager::from(&signer_config); let num_signers = signer_config .signer_entries @@ -266,23 +461,26 @@ impl From for Signer { signer_config.signer_entries.public_keys, ); - if let Some(state) = signer_db - .get_signer_state(signer_config.reward_cycle) - .expect("Failed to load signer state") - { - debug!( - "Reward cycle #{} Signer #{}: Loading signer", - signer_config.reward_cycle, signer_config.signer_id - ); - state_machine.signer = v2::Signer::load(&state); - } + if let Some(state) = load_encrypted_signer_state( + &mut stackerdb_manager, + signer_config.signer_slot_id, + &state_machine.network_private_key, + ).or_else(|err| { + warn!("Failed to load encrypted signer state from StackerDB, falling back to SignerDB: {err}"); + load_encrypted_signer_state( + &signer_db, + signer_config.reward_cycle, + &state_machine.network_private_key) + }).expect("Failed to load encrypted signer state from both StackerDB and SignerDB") { + state_machine.signer = state; + }; Self { coordinator, state_machine, - state: State::Idle, + state: State::Uninitialized, commands: VecDeque::new(), - stackerdb, + stackerdb_manager, mainnet: signer_config.mainnet, signer_id: signer_config.signer_id, signer_addresses: signer_config @@ -295,6 +493,7 @@ impl From for Signer { next_signer_addresses: vec![], reward_cycle: signer_config.reward_cycle, tx_fee_ustx: signer_config.tx_fee_ustx, + max_tx_fee_ustx: signer_config.max_tx_fee_ustx, coordinator_selector, approved_aggregate_public_key: None, miner_key: None, @@ -332,24 +531,21 @@ impl Signer { } /// Update operation - fn update_operation(&mut self) { - self.state = State::OperationInProgress; + fn update_operation(&mut self, operation: Operation) { + self.state = State::OperationInProgress(operation); self.coordinator_selector.last_message_time = Some(Instant::now()); } /// Execute the given command and update state accordingly - fn execute_command(&mut self, stacks_client: &StacksClient, command: &Command) { + fn execute_command(&mut self, stacks_client: &StacksClient, command: &SignerCommand) { match command { - Command::Dkg => { + SignerCommand::Dkg => { + crate::monitoring::increment_commands_processed("dkg"); if self.approved_aggregate_public_key.is_some() { debug!("Reward cycle #{} Signer #{}: Already have an aggregate key. Ignoring DKG command.", self.reward_cycle, self.signer_id); return; } - let vote_round = match retry_with_exponential_backoff(|| { - stacks_client - .get_last_round(self.reward_cycle) - .map_err(backoff::Error::transient) - }) { + let vote_round = match stacks_client.get_last_round(self.reward_cycle) { Ok(last_round) => last_round, Err(e) => { error!("{self}: Unable to perform DKG. Failed to get last round from stacks node: {e:?}"); @@ -365,88 +561,64 @@ impl Signer { ); match self.coordinator.start_dkg_round() { Ok(msg) => { - let ack = self.stackerdb.send_message_with_retry(msg.into()); + let ack = self.stackerdb_manager.send_message_with_retry(msg.into()); debug!("{self}: ACK: {ack:?}",); + self.update_operation(Operation::Dkg); } Err(e) => { error!("{self}: Failed to start DKG: {e:?}",); return; } } + self.update_operation(Operation::Dkg); } - Command::Sign { - block, + SignerCommand::Sign { + block_proposal, is_taproot, merkle_root, } => { + crate::monitoring::increment_commands_processed("sign"); if self.approved_aggregate_public_key.is_none() { debug!("{self}: Cannot sign a block without an approved aggregate public key. Ignore it."); return; } - let signer_signature_hash = block.header.signer_signature_hash(); + let signer_signature_hash = block_proposal.block.header.signer_signature_hash(); let mut block_info = self .signer_db .block_lookup(self.reward_cycle, &signer_signature_hash) - .unwrap_or_else(|_| Some(BlockInfo::new(block.clone()))) - .unwrap_or_else(|| BlockInfo::new(block.clone())); + .unwrap_or_else(|_| Some(BlockInfo::from(block_proposal.clone()))) + .unwrap_or_else(|| BlockInfo::from(block_proposal.clone())); if block_info.signed_over { debug!("{self}: Received a sign command for a block we are already signing over. Ignore it."); return; } info!("{self}: Signing block"; - "block_consensus_hash" => %block.header.consensus_hash, - "block_height" => block.header.chain_length, - "pre_sign_block_id" => %block.block_id(), + "block_consensus_hash" => %block_proposal.block.header.consensus_hash, + "block_height" => block_proposal.block.header.chain_length, + "pre_sign_block_id" => %block_proposal.block.block_id(), ); match self.coordinator.start_signing_round( - &block.serialize_to_vec(), + &block_proposal.serialize_to_vec(), *is_taproot, *merkle_root, ) { Ok(msg) => { - let ack = self.stackerdb.send_message_with_retry(msg.into()); + let ack = self.stackerdb_manager.send_message_with_retry(msg.into()); debug!("{self}: ACK: {ack:?}",); block_info.signed_over = true; self.signer_db - .insert_block(self.reward_cycle, &block_info) + .insert_block(&block_info) .unwrap_or_else(|e| { error!("{self}: Failed to insert block in DB: {e:?}"); }); + self.update_operation(Operation::Sign); } Err(e) => { error!("{self}: Failed to start signing block: {e:?}",); return; } } - } - } - self.update_operation(); - } - - /// Attempt to process the next command in the queue, and update state accordingly - pub fn process_next_command( - &mut self, - stacks_client: &StacksClient, - current_reward_cycle: u64, - ) { - let coordinator_id = self.get_coordinator(current_reward_cycle).0; - match &self.state { - State::Idle => { - if coordinator_id != Some(self.signer_id) { - debug!( - "{self}: Coordinator is {coordinator_id:?}. Will not process any commands...", - ); - return; - } - if let Some(command) = self.commands.pop_front() { - self.execute_command(stacks_client, &command); - } else { - debug!("{self}: Nothing to process. Waiting for command...",); - } - } - State::OperationInProgress => { - // We cannot execute the next command until the current one is finished... - debug!("{self}: Waiting for coordinator {coordinator_id:?} operation to finish. Coordinator state = {:?}", self.coordinator.state); + self.update_operation(Operation::Sign); } } } @@ -456,12 +628,12 @@ impl Signer { &mut self, stacks_client: &StacksClient, block_validate_response: &BlockValidateResponse, - res: Sender>, + res: Sender>, current_reward_cycle: u64, ) { - let coordinator_id = self.get_coordinator(current_reward_cycle).0; let mut block_info = match block_validate_response { BlockValidateResponse::Ok(block_validate_ok) => { + crate::monitoring::increment_block_validation_responses(true); let signer_signature_hash = block_validate_ok.signer_signature_hash; // For mutability reasons, we need to take the block_info out of the map and add it back after processing let mut block_info = match self @@ -482,7 +654,7 @@ impl Signer { let is_valid = self.verify_block_transactions(stacks_client, &block_info.block); block_info.valid = Some(is_valid); self.signer_db - .insert_block(self.reward_cycle, &block_info) + .insert_block(&block_info) .unwrap_or_else(|_| panic!("{self}: Failed to insert block in DB")); info!( "{self}: Treating block validation for block {} as valid: {:?}", @@ -492,6 +664,7 @@ impl Signer { block_info } BlockValidateResponse::Reject(block_validate_reject) => { + crate::monitoring::increment_block_validation_responses(false); let signer_signature_hash = block_validate_reject.signer_signature_hash; let mut block_info = match self .signer_db @@ -513,7 +686,7 @@ impl Signer { // to observe so they know to send another block and to prove signers are doing work); warn!("{self}: Broadcasting a block rejection due to stacks node validation failure..."); if let Err(e) = self - .stackerdb + .stackerdb_manager .send_message_with_retry(block_validate_reject.clone().into()) { warn!("{self}: Failed to send block rejection to stacker-db: {e:?}",); @@ -521,7 +694,7 @@ impl Signer { block_info } }; - if let Some(mut nonce_request) = block_info.nonce_request.take() { + if let Some(mut nonce_request) = block_info.ext.take_nonce_request() { debug!("{self}: Received a block validate response from the stacks node for a block we already received a nonce request for. Responding to the nonce request..."); // We have received validation from the stacks node. Determine our vote and update the request message self.determine_vote(&mut block_info, &mut nonce_request); @@ -531,34 +704,15 @@ impl Signer { sig: vec![], }; self.handle_packets(stacks_client, res, &[packet], current_reward_cycle); - } else { - if block_info.valid.unwrap_or(false) - && !block_info.signed_over - && coordinator_id == Some(self.signer_id) - { - // We are the coordinator. Trigger a signing round for this block - debug!( - "{self}: attempt to trigger a signing round for block"; - "signer_sighash" => %block_info.block.header.signer_signature_hash(), - "block_hash" => %block_info.block.header.block_hash(), - ); - self.commands.push_back(Command::Sign { - block: block_info.block.clone(), - is_taproot: false, - merkle_root: None, - }); - } else { - debug!( - "{self}: ignoring block."; - "block_hash" => block_info.block.header.block_hash(), - "valid" => block_info.valid, - "signed_over" => block_info.signed_over, - "coordinator_id" => coordinator_id, - ); - } } + info!( + "{self}: Received a block validate response"; + "block_hash" => block_info.block.header.block_hash(), + "valid" => block_info.valid, + "signed_over" => block_info.signed_over, + ); self.signer_db - .insert_block(self.reward_cycle, &block_info) + .insert_block(&block_info) .unwrap_or_else(|_| panic!("{self}: Failed to insert block in DB")); } @@ -566,19 +720,24 @@ impl Signer { fn handle_signer_messages( &mut self, stacks_client: &StacksClient, - res: Sender>, + res: Sender>, messages: &[SignerMessage], current_reward_cycle: u64, ) { - let coordinator_pubkey = self.get_coordinator(current_reward_cycle).1; let packets: Vec = messages .iter() .filter_map(|msg| match msg { SignerMessage::DkgResults { .. } | SignerMessage::BlockResponse(_) + | SignerMessage::EncryptedSignerState(_) | SignerMessage::Transactions(_) => None, // TODO: if a signer tries to trigger DKG and we already have one set in the contract, ignore the request. SignerMessage::Packet(packet) => { + let coordinator_pubkey = if Self::is_dkg_message(&packet.msg) { + self.get_coordinator_dkg().1 + } else { + self.get_coordinator_sign(current_reward_cycle).1 + }; self.verify_packet(stacks_client, packet.clone(), &coordinator_pubkey) } }) @@ -586,61 +745,17 @@ impl Signer { self.handle_packets(stacks_client, res, &packets, current_reward_cycle); } - /// Handle proposed blocks submitted by the miners to stackerdb - fn handle_proposed_blocks( - &mut self, - stacks_client: &StacksClient, - proposals: &[BlockProposalSigners], - ) { - for proposal in proposals { - if proposal.reward_cycle != self.reward_cycle { - debug!( - "{self}: Received proposal for block outside of my reward cycle, ignoring."; - "proposal_reward_cycle" => proposal.reward_cycle, - "proposal_burn_height" => proposal.burn_height, - ); - continue; - } - let sig_hash = proposal.block.header.signer_signature_hash(); - match self.signer_db.block_lookup(self.reward_cycle, &sig_hash) { - Ok(Some(block)) => { - debug!( - "{self}: Received proposal for block already known, ignoring new proposal."; - "signer_sighash" => %sig_hash, - "proposal_burn_height" => proposal.burn_height, - "vote" => ?block.vote.as_ref().map(|v| { - if v.rejected { - "REJECT" - } else { - "ACCEPT" - } - }), - "signed_over" => block.signed_over, - ); - continue; - } - Ok(None) => { - // Store the block in our cache - self.signer_db - .insert_block(self.reward_cycle, &BlockInfo::new(proposal.block.clone())) - .unwrap_or_else(|e| { - error!("{self}: Failed to insert block in DB: {e:?}"); - }); - // Submit the block for validation - stacks_client - .submit_block_for_validation_with_retry(proposal.block.clone()) - .unwrap_or_else(|e| { - warn!("{self}: Failed to submit block for validation: {e:?}"); - }); - } - Err(e) => { - error!( - "{self}: Failed to lookup block in DB: {e:?}. Dropping proposal request." - ); - continue; - } - } - } + /// Helper function for determining if the provided message is a DKG specific message + fn is_dkg_message(msg: &Message) -> bool { + matches!( + msg, + Message::DkgBegin(_) + | Message::DkgEnd(_) + | Message::DkgEndBegin(_) + | Message::DkgPrivateBegin(_) + | Message::DkgPrivateShares(_) + | Message::DkgPublicShares(_) + ) } /// Process inbound packets as both a signer and a coordinator @@ -648,10 +763,13 @@ impl Signer { fn handle_packets( &mut self, stacks_client: &StacksClient, - res: Sender>, + res: Sender>, packets: &[Packet], current_reward_cycle: u64, ) { + if let Ok(packets_len) = packets.len().try_into() { + crate::monitoring::increment_inbound_packets(packets_len); + } let signer_outbound_messages = self .state_machine .process_inbound_messages(packets) @@ -680,13 +798,36 @@ impl Signer { self.process_operation_results(stacks_client, &operation_results); self.send_operation_results(res, operation_results); self.finish_operation(); - } else if !packets.is_empty() && self.coordinator.state != CoordinatorState::Idle { - // We have received a message and are in the middle of an operation. Update our state accordingly - self.update_operation(); + } else if !packets.is_empty() { + // We have received a message. Update our state accordingly + // Let us be extra explicit in case a new state type gets added to wsts' state machine + match &self.coordinator.state { + CoordinatorState::Idle => {} + CoordinatorState::DkgPublicDistribute + | CoordinatorState::DkgPublicGather + | CoordinatorState::DkgPrivateDistribute + | CoordinatorState::DkgPrivateGather + | CoordinatorState::DkgEndDistribute + | CoordinatorState::DkgEndGather => { + self.update_operation(Operation::Dkg); + } + CoordinatorState::NonceRequest(_, _) + | CoordinatorState::NonceGather(_, _) + | CoordinatorState::SigShareRequest(_, _) + | CoordinatorState::SigShareGather(_, _) => { + self.update_operation(Operation::Sign); + } + } } - debug!("{self}: Saving signer state"); - self.save_signer_state(); + if packets + .iter() + .any(|packet| matches!(packet.msg, Message::DkgEnd(_))) + { + debug!("{self}: Saving signer state"); + self.save_signer_state() + .unwrap_or_else(|_| panic!("{self}: Failed to save signer state")); + } self.send_outbound_messages(signer_outbound_messages); self.send_outbound_messages(coordinator_outbound_messages); } @@ -751,26 +892,35 @@ impl Signer { stacks_client: &StacksClient, nonce_request: &mut NonceRequest, ) -> Option { - let Some(block) = - NakamotoBlock::consensus_deserialize(&mut nonce_request.message.as_slice()).ok() + let Some(block_proposal) = + BlockProposal::consensus_deserialize(&mut nonce_request.message.as_slice()).ok() else { - // We currently reject anything that is not a block + // We currently reject anything that is not a valid block proposal warn!("{self}: Received a nonce request for an unknown message stream. Reject it.",); return None; }; - let signer_signature_hash = block.header.signer_signature_hash(); + if block_proposal.reward_cycle != self.reward_cycle { + // We are not signing for this reward cycle. Reject the block + warn!( + "{self}: Received a nonce request for a different reward cycle. Reject it."; + "requested_reward_cycle" => block_proposal.reward_cycle, + ); + return None; + } + // TODO: could add a check to ignore an old burn block height if we know its oudated. Would require us to store the burn block height we last saw on the side. + let signer_signature_hash = block_proposal.block.header.signer_signature_hash(); let Some(mut block_info) = self .signer_db .block_lookup(self.reward_cycle, &signer_signature_hash) .expect("Failed to connect to signer DB") else { debug!( - "{self}: We have received a block sign request for a block we have not seen before. Cache the nonce request and submit the block for validation..."; - "signer_sighash" => %block.header.signer_signature_hash(), + "{self}: received a nonce request for a new block. Submit block for validation. "; + "signer_sighash" => %signer_signature_hash, ); - let block_info = BlockInfo::new_with_request(block.clone(), nonce_request.clone()); + let block_info = BlockInfo::new_v1_with_request(block_proposal, nonce_request.clone()); stacks_client - .submit_block_for_validation_with_retry(block) + .submit_block_for_validation(block_info.block.clone()) .unwrap_or_else(|e| { warn!("{self}: Failed to submit block for validation: {e:?}",); }); @@ -780,7 +930,12 @@ impl Signer { if block_info.valid.is_none() { // We have not yet received validation from the stacks node. Cache the request and wait for validation debug!("{self}: We have yet to receive validation from the stacks node for a nonce request. Cache the nonce request and wait for block validation..."); - block_info.nonce_request = Some(nonce_request.clone()); + block_info + .ext + .set_nonce_request(nonce_request.clone()) + .unwrap_or_else(|e| { + warn!("{self}: Failed to set nonce_request: {e:?}",); + }); return Some(block_info); } @@ -794,10 +949,14 @@ impl Signer { stacks_client: &StacksClient, block: &NakamotoBlock, ) -> bool { - if self.approved_aggregate_public_key.is_some() { - // We do not enforce a block contain any transactions except the aggregate votes when it is NOT already set - // TODO: should be only allow special cased transactions during prepare phase before a key is set? - debug!("{self}: Already have an aggregate key. Skipping transaction verification..."); + let next_reward_cycle = self.reward_cycle.wrapping_add(1); + let approved_aggregate_public_key = stacks_client + .get_approved_aggregate_key(next_reward_cycle) + .unwrap_or(None); + if approved_aggregate_public_key.is_some() { + // We do not enforce a block contain any transactions except the aggregate votes when it is NOT already set for the upcoming signers' reward cycle + // Otherwise it is a waste of block space and time to enforce as the desired outcome has been reached. + debug!("{self}: Already have an aggregate key for the next signer set's reward cycle ({}). Skipping transaction verification...", next_reward_cycle); return true; } if let Ok(expected_transactions) = self.get_expected_transactions(stacks_client) { @@ -825,7 +984,7 @@ impl Signer { ); // Submit signature result to miners to observe if let Err(e) = self - .stackerdb + .stackerdb_manager .send_message_with_retry(block_rejection.into()) { warn!("{self}: Failed to send block rejection to stacker-db: {e:?}",); @@ -841,10 +1000,10 @@ impl Signer { ); // Submit signature result to miners to observe if let Err(e) = self - .stackerdb + .stackerdb_manager .send_message_with_retry(block_rejection.into()) { - warn!("{self}: Failed to send block submission to stacker-db: {e:?}",); + warn!("{self}: Failed to send block rejection to stacker-db: {e:?}",); } false } @@ -856,8 +1015,8 @@ impl Signer { nonces: &std::collections::HashMap, ) -> Result, ClientError> { let transactions: Vec<_> = self - .stackerdb - .get_current_transactions_with_retry()? + .stackerdb_manager + .get_current_transactions()? .into_iter() .filter_map(|tx| { if !NakamotoSigners::valid_vote_transaction(nonces, &tx, self.mainnet) { @@ -881,8 +1040,8 @@ impl Signer { // Get all the account nonces for the next signers let account_nonces = self.get_account_nonces(stacks_client, &self.next_signer_addresses); let transactions: Vec<_> = self - .stackerdb - .get_next_transactions_with_retry(&self.next_signer_slot_ids)?; + .stackerdb_manager + .get_next_transactions(&self.next_signer_slot_ids)?; let mut filtered_transactions = std::collections::HashMap::new(); NakamotoSigners::update_filtered_transactions( &mut filtered_transactions, @@ -939,8 +1098,8 @@ impl Signer { return None; }; self.signer_db - .insert_block(self.reward_cycle, &updated_block_info) - .expect(&format!("{self}: Failed to insert block in DB")); + .insert_block(&updated_block_info) + .unwrap_or_else(|_| panic!("{self}: Failed to insert block in DB")); let process_request = updated_block_info.vote.is_some(); if !process_request { debug!("Failed to validate nonce request"); @@ -972,20 +1131,25 @@ impl Signer { // Signers only every trigger non-taproot signing rounds over blocks. Ignore SignTaproot results match operation_result { OperationResult::Sign(signature) => { - debug!("{self}: Received signature result"); + crate::monitoring::increment_operation_results("sign"); + info!("{self}: Received signature result"); self.process_signature(signature); } OperationResult::SignTaproot(_) => { + crate::monitoring::increment_operation_results("sign_taproot"); debug!("{self}: Received a signature result for a taproot signature. Nothing to broadcast as we currently sign blocks with a FROST signature."); } OperationResult::Dkg(aggregate_key) => { + crate::monitoring::increment_operation_results("dkg"); self.process_dkg(stacks_client, aggregate_key); } OperationResult::SignError(e) => { + crate::monitoring::increment_operation_results("sign_error"); warn!("{self}: Received a Sign error: {e:?}"); self.process_sign_error(e); } OperationResult::DkgError(e) => { + crate::monitoring::increment_operation_results("dkg_error"); warn!("{self}: Received a DKG error: {e:?}"); // TODO: process these errors and track malicious signers to report } @@ -996,6 +1160,10 @@ impl Signer { /// Process a dkg result by broadcasting a vote to the stacks node fn process_dkg(&mut self, stacks_client: &StacksClient, dkg_public_key: &Point) { let mut dkg_results_bytes = vec![]; + debug!( + "{self}: Received DKG result. Broadcasting vote to the stacks node..."; + "dkg_public_key" => %dkg_public_key + ); if let Err(e) = SignerMessage::serialize_dkg_result( &mut dkg_results_bytes, dkg_public_key, @@ -1003,54 +1171,34 @@ impl Signer { ) { error!("{}: Failed to serialize DKGResults message for StackerDB, will continue operating.", self.signer_id; "error" => %e); - } else { - if let Err(e) = self - .stackerdb - .send_message_bytes_with_retry(&MessageSlotID::DkgResults, dkg_results_bytes) - { - error!("{}: Failed to send DKGResults message to StackerDB, will continue operating.", self.signer_id; + } else if let Err(e) = self + .stackerdb_manager + .send_message_bytes_with_retry(&MessageSlotID::DkgResults, dkg_results_bytes) + { + error!("{}: Failed to send DKGResults message to StackerDB, will continue operating.", self.signer_id; "error" => %e); - } } - let epoch = retry_with_exponential_backoff(|| { - stacks_client - .get_node_epoch() - .map_err(backoff::Error::transient) - }) - .unwrap_or(StacksEpochId::Epoch24); - let tx_fee = if epoch < StacksEpochId::Epoch30 { - debug!("{self}: in pre Epoch 3.0 cycles, must set a transaction fee for the DKG vote."); - Some(self.tx_fee_ustx) - } else { - None - }; // Get our current nonce from the stacks node and compare it against what we have sitting in the stackerdb instance let signer_address = stacks_client.get_signer_address(); // Retreieve ALL account nonces as we may have transactions from other signers in our stackerdb slot that we care about let account_nonces = self.get_account_nonces(stacks_client, &self.signer_addresses); let account_nonce = account_nonces.get(signer_address).unwrap_or(&0); - let signer_transactions = retry_with_exponential_backoff(|| { - self.get_signer_transactions(&account_nonces) - .map_err(backoff::Error::transient) - }) - .map_err(|e| { - warn!("{self}: Unable to get signer transactions: {e:?}"); - }) - .unwrap_or_default(); + let signer_transactions = self + .get_signer_transactions(&account_nonces) + .map_err(|e| { + error!("{self}: Unable to get signer transactions: {e:?}."); + }) + .unwrap_or_default(); // If we have a transaction in the stackerdb slot, we need to increment the nonce hence the +1, else should use the account nonce let next_nonce = signer_transactions .first() .map(|tx| tx.get_origin_nonce().wrapping_add(1)) .unwrap_or(*account_nonce); - match stacks_client.build_vote_for_aggregate_public_key( - self.stackerdb.get_signer_slot_id().0, - self.coordinator.current_dkg_id, - *dkg_public_key, - self.reward_cycle, - tx_fee, - next_nonce, - ) { + let epoch = stacks_client + .get_node_epoch() + .unwrap_or(StacksEpochId::Epoch24); + match self.build_dkg_vote(stacks_client, &epoch, next_nonce, *dkg_public_key) { Ok(new_transaction) => { if let Err(e) = self.broadcast_dkg_vote( stacks_client, @@ -1071,6 +1219,44 @@ impl Signer { } } + /// Build a signed DKG vote transaction + fn build_dkg_vote( + &mut self, + stacks_client: &StacksClient, + epoch: &StacksEpochId, + nonce: u64, + dkg_public_key: Point, + ) -> Result { + let mut unsigned_tx = stacks_client.build_unsigned_vote_for_aggregate_public_key( + self.stackerdb_manager.get_signer_slot_id().0, + self.coordinator.current_dkg_id, + dkg_public_key, + self.reward_cycle, + nonce, + )?; + let tx_fee = if epoch < &StacksEpochId::Epoch30 { + info!("{self}: in pre Epoch 3.0 cycles, must set a transaction fee for the DKG vote."); + let fee = if let Some(max_fee) = self.max_tx_fee_ustx { + let estimated_fee = stacks_client + .get_medium_estimated_fee_ustx(&unsigned_tx) + .map_err(|e| { + warn!("{self}: unable to estimate fee for DKG vote transaction: {e:?}."); + e + }) + .unwrap_or(self.tx_fee_ustx); + std::cmp::min(estimated_fee, max_fee) + } else { + self.tx_fee_ustx + }; + debug!("{self}: Using a fee of {fee} uSTX for DKG vote transaction."); + fee + } else { + 0 + }; + unsigned_tx.set_tx_fee(tx_fee); + stacks_client.sign_transaction(unsigned_tx) + } + // Get the account nonces for the provided list of signer addresses fn get_account_nonces( &self, @@ -1108,7 +1294,7 @@ impl Signer { debug!("{self}: Received a DKG result while in epoch 3.0. Broadcast the transaction only to stackerDB."); } else if epoch == StacksEpochId::Epoch25 { debug!("{self}: Received a DKG result while in epoch 2.5. Broadcast the transaction to the mempool."); - stacks_client.submit_transaction_with_retry(&new_transaction)?; + stacks_client.submit_transaction(&new_transaction)?; info!("{self}: Submitted DKG vote transaction ({txid:?}) to the mempool"); } else { debug!("{self}: Received a DKG result, but are in an unsupported epoch. Do not broadcast the transaction ({}).", new_transaction.txid()); @@ -1117,7 +1303,9 @@ impl Signer { // For all Pox-4 epochs onwards, broadcast the results also to stackerDB for other signers/miners to observe signer_transactions.push(new_transaction); let signer_message = SignerMessage::Transactions(signer_transactions); - self.stackerdb.send_message_with_retry(signer_message)?; + self.stackerdb_manager + .send_message_with_retry(signer_message)?; + crate::monitoring::increment_dkg_votes_submitted(); info!("{self}: Broadcasted DKG vote transaction ({txid}) to stacker DB"); Ok(()) } @@ -1133,9 +1321,11 @@ impl Signer { }; let block_submission = if block_vote.rejected { + crate::monitoring::increment_block_responses_sent(false); // We signed a rejection message. Return a rejection message BlockResponse::rejected(block_vote.signer_signature_hash, signature.clone()) } else { + crate::monitoring::increment_block_responses_sent(true); // we agreed to sign the block hash. Return an approval message BlockResponse::accepted(block_vote.signer_signature_hash, signature.clone()) }; @@ -1143,7 +1333,7 @@ impl Signer { // Submit signature result to miners to observe info!("{self}: Submit block response: {block_submission}"); if let Err(e) = self - .stackerdb + .stackerdb_manager .send_message_with_retry(block_submission.into()) { warn!("{self}: Failed to send block submission to stacker-db: {e:?}"); @@ -1183,33 +1373,75 @@ impl Signer { debug!("{self}: Broadcasting block rejection: {block_rejection:?}"); // Submit signature result to miners to observe if let Err(e) = self - .stackerdb + .stackerdb_manager .send_message_with_retry(block_rejection.into()) { warn!("{self}: Failed to send block rejection submission to stacker-db: {e:?}"); } } - /// Persist state needed to ensure the signer can continue to perform - /// DKG and participate in signing rounds accross crashes - /// - /// # Panics - /// Panics if the insertion fails - fn save_signer_state(&self) { + /// Persist signer state in both SignerDB and StackerDB + fn save_signer_state(&mut self) -> Result<(), PersistenceError> { + let rng = &mut OsRng; + let state = self.state_machine.signer.save(); + let serialized_state = serde_json::to_vec(&state)?; + + let encrypted_state = encrypt( + &self.state_machine.network_private_key, + &serialized_state, + rng, + )?; + + let signerdb_result = self.save_signer_state_in_signerdb(&encrypted_state); + let stackerdb_result = self.save_signer_state_in_stackerdb(encrypted_state); + + if let Err(err) = &signerdb_result { + warn!("{self}: Failed to persist state in SignerDB: {err}"); + } + + if let Err(err) = &stackerdb_result { + warn!("{self}: Failed to persist state in StackerDB: {err}"); + + stackerdb_result + } else { + signerdb_result + } + } + + /// Persist signer state in SignerDB + fn save_signer_state_in_signerdb( + &self, + encrypted_state: &[u8], + ) -> Result<(), PersistenceError> { self.signer_db - .insert_signer_state(self.reward_cycle, &state) - .expect("Failed to persist signer state"); + .insert_encrypted_signer_state(self.reward_cycle, encrypted_state)?; + Ok(()) + } + + /// Persist signer state in StackerDB + /// TODO: this is a no-op until the number of signer slots can be expanded + fn save_signer_state_in_stackerdb( + &mut self, + _encrypted_state: Vec, + ) -> Result<(), PersistenceError> { + /* + * This is a no-op until the number of signer slots can be expanded to 14 + * + let message = SignerMessage::EncryptedSignerState(encrypted_state); + self.stackerdb_manager.send_message_with_retry(message)?; + */ + Ok(()) } /// Send any operation results across the provided channel fn send_operation_results( &mut self, - res: Sender>, + res: Sender>, operation_results: Vec, ) { let nmb_results = operation_results.len(); - match res.send(operation_results) { + match res.send(operation_results.into_iter().map(|r| r.into()).collect()) { Ok(_) => { debug!("{self}: Successfully sent {nmb_results} operation result(s)") } @@ -1226,7 +1458,7 @@ impl Signer { outbound_messages.len() ); for msg in outbound_messages { - let ack = self.stackerdb.send_message_with_retry(msg.into()); + let ack = self.stackerdb_manager.send_message_with_retry(msg.into()); if let Ok(ack) = ack { debug!("{self}: send outbound ACK: {ack:?}"); } else { @@ -1235,39 +1467,93 @@ impl Signer { } } - /// Update the DKG for the provided signer info, triggering it if required - pub fn update_dkg( + /// Refresh DKG and queue it if required + pub fn refresh_dkg( &mut self, stacks_client: &StacksClient, + res: Sender>, current_reward_cycle: u64, ) -> Result<(), ClientError> { - let reward_cycle = self.reward_cycle; + // First attempt to retrieve the aggregate key from the contract. + self.update_approved_aggregate_key(stacks_client)?; + if self.approved_aggregate_public_key.is_some() { + return Ok(()); + } + // Check stackerdb for any missed DKG messages to catch up our state. + self.read_dkg_stackerdb_messages(stacks_client, res, current_reward_cycle)?; + // Check if we should still queue DKG + if !self.should_queue_dkg(stacks_client)? { + return Ok(()); + } + // Because there could be a slight delay in reading pending transactions and a key being approved by the contract, + // check one last time if the approved key was set since we finished the should queue dkg call + self.update_approved_aggregate_key(stacks_client)?; + if self.approved_aggregate_public_key.is_some() { + return Ok(()); + } + if self.commands.front() != Some(&SignerCommand::Dkg) { + info!("{self} is the current coordinator and must trigger DKG. Queuing DKG command..."); + self.commands.push_front(SignerCommand::Dkg); + } else { + debug!("{self}: DKG command already queued..."); + } + Ok(()) + } + + /// Overwrites the approved aggregate key to the value in the contract, updating state accordingly + pub fn update_approved_aggregate_key( + &mut self, + stacks_client: &StacksClient, + ) -> Result<(), ClientError> { let old_dkg = self.approved_aggregate_public_key; self.approved_aggregate_public_key = - stacks_client.get_approved_aggregate_key(reward_cycle)?; + stacks_client.get_approved_aggregate_key(self.reward_cycle)?; if self.approved_aggregate_public_key.is_some() { // TODO: this will never work as is. We need to have stored our party shares on the side etc for this particular aggregate key. // Need to update state to store the necessary info, check against it to see if we have participated in the winning round and // then overwrite our value accordingly. Otherwise, we will be locked out of the round and should not participate. + let internal_dkg = self.coordinator.aggregate_public_key; + if internal_dkg != self.approved_aggregate_public_key { + warn!("{self}: we do not support changing the internal DKG key yet. Expected {internal_dkg:?} got {:?}", self.approved_aggregate_public_key); + } self.coordinator .set_aggregate_public_key(self.approved_aggregate_public_key); if old_dkg != self.approved_aggregate_public_key { - debug!( - "{self}: updated DKG value to {:?}.", + warn!( + "{self}: updated DKG value from {old_dkg:?} to {:?}.", self.approved_aggregate_public_key ); } - return Ok(()); - }; + match self.state { + State::OperationInProgress(Operation::Dkg) => { + debug!( + "{self}: DKG has already been set. Aborting DKG operation {}.", + self.coordinator.current_dkg_id + ); + self.finish_operation(); + } + State::Uninitialized => { + // If we successfully load the DKG value, we are fully initialized + self.state = State::Idle; + } + _ => { + // do nothing + } + } + } + Ok(()) + } + + /// Should DKG be queued to the current signer's command queue + /// This assumes that no key has been approved by the contract yet + pub fn should_queue_dkg(&mut self, stacks_client: &StacksClient) -> Result { if self.state != State::Idle - || Some(self.signer_id) != self.get_coordinator(current_reward_cycle).0 + || self.signer_id != self.get_coordinator_dkg().0 + || self.commands.front() == Some(&SignerCommand::Dkg) { - // We are not the coordinator or we are in the middle of an operation. Do not attempt to queue DKG - return Ok(()); + // We are not the coordinator, we are in the middle of an operation, or we have already queued DKG. Do not attempt to queue DKG + return Ok(false); } - debug!("{self}: Checking if old DKG vote transaction exists in StackerDB..."); - // Have I already voted, but the vote is still pending in StackerDB? Check stackerdb for the same round number and reward cycle vote transaction - // Only get the account nonce of THIS signer as we only care about our own votes, not other signer votes let signer_address = stacks_client.get_signer_address(); let account_nonces = self.get_account_nonces(stacks_client, &[*signer_address]); let old_transactions = self.get_signer_transactions(&account_nonces).map_err(|e| { @@ -1279,20 +1565,19 @@ impl Signer { NakamotoSigners::parse_vote_for_aggregate_public_key(transaction).unwrap_or_else(|| panic!("BUG: {self}: Received an invalid {SIGNERS_VOTING_FUNCTION_NAME} transaction in an already filtered list: {transaction:?}")); if Some(params.aggregate_key) == self.coordinator.aggregate_public_key && params.voting_round == self.coordinator.current_dkg_id - && reward_cycle == self.reward_cycle { debug!("{self}: Not triggering a DKG round. Already have a pending vote transaction."; "txid" => %transaction.txid(), "aggregate_key" => %params.aggregate_key, "voting_round" => params.voting_round ); - return Ok(()); + return Ok(false); } } if let Some(aggregate_key) = stacks_client.get_vote_for_aggregate_public_key( self.coordinator.current_dkg_id, self.reward_cycle, - *stacks_client.get_signer_address(), + *signer_address, )? { let Some(round_weight) = stacks_client .get_round_vote_weight(self.reward_cycle, self.coordinator.current_dkg_id)? @@ -1302,7 +1587,7 @@ impl Signer { "voting_round" => self.coordinator.current_dkg_id, "aggregate_key" => %aggregate_key ); - return Ok(()); + return Ok(false); }; let threshold_weight = stacks_client.get_vote_threshold_weight(self.reward_cycle)?; if round_weight < threshold_weight { @@ -1315,87 +1600,164 @@ impl Signer { "round_weight" => round_weight, "threshold_weight" => threshold_weight ); - return Ok(()); + return Ok(false); } - debug!("{self}: Vote for DKG failed. Triggering a DKG round."; - "voting_round" => self.coordinator.current_dkg_id, - "aggregate_key" => %aggregate_key, - "round_weight" => round_weight, - "threshold_weight" => threshold_weight - ); - } else { - debug!("{self}: Triggering a DKG round."); - } - if self.commands.front() != Some(&Command::Dkg) { - info!("{self} is the current coordinator and must trigger DKG. Queuing DKG command..."); - self.commands.push_front(Command::Dkg); } else { - debug!("{self}: DKG command already queued..."); - } - Ok(()) - } - - /// Process the event - pub fn process_event( - &mut self, - stacks_client: &StacksClient, - event: Option<&SignerEvent>, - res: Sender>, - current_reward_cycle: u64, - ) -> Result<(), ClientError> { - debug!("{self}: Processing event: {event:?}"); - match event { - Some(SignerEvent::BlockValidationResponse(block_validate_response)) => { - debug!("{self}: Received a block proposal result from the stacks node..."); - self.handle_block_validate_response( - stacks_client, - block_validate_response, - res, - current_reward_cycle, - ) - } - Some(SignerEvent::SignerMessages(signer_set, messages)) => { - if *signer_set != self.stackerdb.get_signer_set() { - debug!("{self}: Received a signer message for a reward cycle that does not belong to this signer. Ignoring..."); - return Ok(()); + // Have I already voted, but the vote is still pending in StackerDB? Check stackerdb for the same round number and reward cycle vote transaction + // Only get the account nonce of THIS signer as we only care about our own votes, not other signer votes + let account_nonce = stacks_client.get_account_nonce(signer_address).unwrap_or(0); + let old_transactions = self.stackerdb_manager.get_current_transactions()?; + // Check if we have an existing vote transaction for the same round and reward cycle + for transaction in old_transactions.iter() { + // We should not consider other signer transactions and should ignore invalid transaction versions + if transaction.origin_address() != *signer_address + || transaction.is_mainnet() != self.mainnet + { + continue; } - debug!( - "{self}: Received {} messages from the other signers...", - messages.len() - ); - self.handle_signer_messages(stacks_client, res, messages, current_reward_cycle); - } - Some(SignerEvent::MinerMessages(blocks, messages, miner_key)) => { - if let Some(miner_key) = miner_key { - let miner_key = PublicKey::try_from(miner_key.to_bytes_compressed().as_slice()) - .expect("FATAL: could not convert from StacksPublicKey to PublicKey"); - self.miner_key = Some(miner_key); + let Some(params) = + NakamotoSigners::parse_vote_for_aggregate_public_key(transaction) + else { + continue; }; - if current_reward_cycle != self.reward_cycle { - // There is not point in processing blocks if we are not the current reward cycle (we can never actually contribute to signing these blocks) - debug!("{self}: Received a proposed block, but this signer's reward cycle is not the current one ({current_reward_cycle}). Ignoring..."); - return Ok(()); + let Some(dkg_public_key) = self.coordinator.aggregate_public_key else { + break; + }; + if params.aggregate_key == dkg_public_key + && params.voting_round == self.coordinator.current_dkg_id + && params.reward_cycle == self.reward_cycle + { + let origin_nonce = transaction.get_origin_nonce(); + if origin_nonce < account_nonce { + // We have already voted, but our vote nonce is outdated. Resubmit vote with updated transaction + warn!("{self}: DKG vote submitted with invalid nonce ({origin_nonce} < {account_nonce}). Resubmitting vote."); + self.process_dkg(stacks_client, &dkg_public_key); + } else { + debug!("{self}: Already have a pending DKG vote in StackerDB. Waiting for it to be confirmed."; + "txid" => %transaction.txid(), + "aggregate_key" => %params.aggregate_key, + "voting_round" => params.voting_round, + "reward_cycle" => params.reward_cycle, + "nonce" => origin_nonce + ); + } + return Ok(false); } - debug!( - "{self}: Received {} block proposals and {} messages from the miner", - blocks.len(), - messages.len(); - "miner_key" => ?miner_key, - ); - self.handle_signer_messages(stacks_client, res, messages, current_reward_cycle); - self.handle_proposed_blocks(stacks_client, blocks); - } - Some(SignerEvent::StatusCheck) => { - debug!("{self}: Received a status check event.") - } - Some(SignerEvent::NewBurnBlock(height)) => { - debug!("{self}: Receved a new burn block event for block height {height}") - } - None => { - // No event. Do nothing. - debug!("{self}: No event received") } } - Ok(()) + Ok(true) + } +} + +fn load_encrypted_signer_state( + storage: S, + id: S::IdType, + private_key: &Scalar, +) -> Result, PersistenceError> { + if let Some(encrypted_state) = storage.get_encrypted_signer_state(id)? { + let serialized_state = decrypt(private_key, &encrypted_state)?; + let state = serde_json::from_slice(&serialized_state) + .expect("Failed to deserialize decryoted state"); + Ok(Some(v2::Signer::load(&state))) + } else { + Ok(None) + } +} + +trait SignerStateStorage { + type IdType; + + fn get_encrypted_signer_state( + self, + signer_config: Self::IdType, + ) -> Result>, PersistenceError>; +} + +impl SignerStateStorage for &mut StackerDBManager { + type IdType = SignerSlotID; + + fn get_encrypted_signer_state( + self, + id: Self::IdType, + ) -> Result>, PersistenceError> { + Ok(self.get_encrypted_signer_state(id)?) + } +} + +impl SignerStateStorage for &SignerDb { + type IdType = u64; + fn get_encrypted_signer_state( + self, + id: Self::IdType, + ) -> Result>, PersistenceError> { + Ok(self.get_encrypted_signer_state(id)?) + } +} + +fn encrypt( + private_key: &Scalar, + msg: &[u8], + rng: &mut impl rand_core::CryptoRngCore, +) -> Result, EncryptionError> { + wsts::util::encrypt(derive_encryption_key(private_key).as_bytes(), msg, rng) + .map_err(|_| EncryptionError::Encrypt) +} + +fn decrypt(private_key: &Scalar, encrypted_msg: &[u8]) -> Result, EncryptionError> { + wsts::util::decrypt(derive_encryption_key(private_key).as_bytes(), encrypted_msg) + .map_err(|_| EncryptionError::Decrypt) +} + +fn derive_encryption_key(private_key: &Scalar) -> Sha512Trunc256Sum { + let mut prefixed_key = "SIGNER_STATE_ENCRYPTION_KEY/".as_bytes().to_vec(); + prefixed_key.extend_from_slice(&private_key.to_bytes()); + + Sha512Trunc256Sum::from_data(&prefixed_key) +} + +/// Error stemming from a persistence operation +#[derive(Debug, thiserror::Error)] +pub enum PersistenceError { + /// Encryption error + #[error("{0}")] + Encryption(#[from] EncryptionError), + /// Database error + #[error("Database operation failed: {0}")] + DBError(#[from] DBError), + /// Serialization error + #[error("JSON serialization failed: {0}")] + JsonSerializationError(#[from] serde_json::Error), + /// StackerDB client error + #[error("StackerDB client error: {0}")] + StackerDBClientError(#[from] ClientError), +} + +/// Error stemming from a persistence operation +#[derive(Debug, thiserror::Error)] +pub enum EncryptionError { + /// Encryption failed + #[error("Encryption operation failed")] + Encrypt, + /// Decryption failed + #[error("Encryption operation failed")] + Decrypt, +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn encrypted_messages_should_be_possible_to_decrypt() { + let msg = "Nobody's gonna know".as_bytes(); + let key = Scalar::random(&mut OsRng); + + let encrypted = encrypt(&key, msg, &mut OsRng).unwrap(); + + assert_ne!(encrypted, msg); + + let decrypted = decrypt(&key, &encrypted).unwrap(); + + assert_eq!(decrypted, msg); } } diff --git a/stacks-signer/src/v1/stackerdb_manager.rs b/stacks-signer/src/v1/stackerdb_manager.rs new file mode 100644 index 00000000000..cf5e4840225 --- /dev/null +++ b/stacks-signer/src/v1/stackerdb_manager.rs @@ -0,0 +1,326 @@ +// Copyright (C) 2013-2020 Blockstack PBC, a public benefit corporation +// Copyright (C) 2020-2024 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . +// +use blockstack_lib::chainstate::stacks::StacksTransaction; +use clarity::types::chainstate::StacksPrivateKey; +use libsigner::v1::messages::{MessageSlotID, SignerMessage}; +use libsigner::{SignerSession, StackerDBSession}; +use libstackerdb::StackerDBChunkAckData; +use slog::{slog_debug, slog_error, slog_warn}; +use stacks_common::codec::read_next; +use stacks_common::{debug, error, warn}; +use wsts::net::Packet; + +use crate::client::stackerdb::StackerDB; +use crate::client::{retry_with_exponential_backoff, ClientError, SignerSlotID}; +use crate::config::SignerConfig; + +/// The session manager for communicating with the .signers contracts for the current and next reward cycle +#[derive(Debug)] +pub struct StackerDBManager { + /// The stacker-db transaction msg session for the NEXT reward cycle + next_transaction_session: StackerDBSession, + /// The stacker-db sessions for each signer set and message type. + stackerdb: StackerDB, +} + +impl From<&SignerConfig> for StackerDBManager { + fn from(config: &SignerConfig) -> Self { + let stackerdb = StackerDB::from(config); + let next_transaction_session = StackerDBSession::new( + &config.node_host, + MessageSlotID::Transactions + .stacker_db_contract(config.mainnet, config.reward_cycle.wrapping_add(1)), + ); + Self { + next_transaction_session, + stackerdb, + } + } +} +impl StackerDBManager { + /// Create a new StackerDB Manager + pub fn new( + host: &str, + stacks_private_key: StacksPrivateKey, + is_mainnet: bool, + reward_cycle: u64, + signer_slot_id: SignerSlotID, + ) -> Self { + let stackerdb = StackerDB::new( + host, + stacks_private_key, + is_mainnet, + reward_cycle, + signer_slot_id, + ); + let next_transaction_session = StackerDBSession::new( + host, + MessageSlotID::Transactions + .stacker_db_contract(is_mainnet, reward_cycle.wrapping_add(1)), + ); + Self { + next_transaction_session, + stackerdb, + } + } + + /// Send a message to the stackerdb with retry + pub fn send_message_with_retry( + &mut self, + message: SignerMessage, + ) -> Result { + self.stackerdb.send_message_with_retry(message) + } + + /// Sends message (as a raw msg ID and bytes) to the .signers stacker-db with an + /// exponential backoff retry + pub fn send_message_bytes_with_retry( + &mut self, + msg_id: &MessageSlotID, + message_bytes: Vec, + ) -> Result { + self.stackerdb + .send_message_bytes_with_retry(msg_id, message_bytes) + } + + /// Get the ordered DKG packets from stackerdb for the signer slot IDs. + pub fn get_dkg_packets( + &mut self, + signer_ids: &[SignerSlotID], + ) -> Result, ClientError> { + let packet_slots = &[ + MessageSlotID::DkgBegin, + MessageSlotID::DkgPublicShares, + MessageSlotID::DkgPrivateBegin, + MessageSlotID::DkgPrivateShares, + MessageSlotID::DkgEndBegin, + MessageSlotID::DkgEnd, + ]; + let slot_ids = signer_ids.iter().map(|id| id.0).collect::>(); + let mut packets = vec![]; + for packet_slot in packet_slots { + let session = self + .stackerdb + .get_session_mut(packet_slot) + .ok_or(ClientError::NotConnected)?; + let messages = StackerDB::get_messages(session, &slot_ids)?; + for message in messages { + let SignerMessage::Packet(packet) = message else { + warn!("Found an unexpected type in a packet slot {packet_slot}"); + continue; + }; + packets.push(packet); + } + } + Ok(packets) + } + + /// Get the transactions from stackerdb for the signers + fn get_transactions( + transactions_session: &mut StackerDBSession, + signer_ids: &[SignerSlotID], + ) -> Result, ClientError> { + let slot_ids = signer_ids.iter().map(|id| id.0).collect::>(); + let messages = StackerDB::get_messages(transactions_session, &slot_ids)?; + let mut transactions = vec![]; + for message in messages { + let SignerMessage::Transactions(chunk_transactions) = message else { + warn!("Signer wrote an unexpected type to the transactions slot"); + continue; + }; + transactions.extend(chunk_transactions); + } + Ok(transactions) + } + + /// Get this signer's latest transactions from stackerdb + pub fn get_current_transactions(&mut self) -> Result, ClientError> { + let signer_slot_id = self.get_signer_slot_id(); + let Some(transactions_session) = + self.stackerdb.get_session_mut(&MessageSlotID::Transactions) + else { + return Err(ClientError::NotConnected); + }; + Self::get_transactions(transactions_session, &[signer_slot_id]) + } + + /// Get the latest signer transactions from signer ids for the next reward cycle + pub fn get_next_transactions( + &mut self, + signer_ids: &[SignerSlotID], + ) -> Result, ClientError> { + debug!("Getting latest chunks from stackerdb for the following signers: {signer_ids:?}",); + Self::get_transactions(&mut self.next_transaction_session, signer_ids) + } + + /// Get the encrypted state for the given signer + pub fn get_encrypted_signer_state( + &mut self, + signer_id: SignerSlotID, + ) -> Result>, ClientError> { + debug!("Getting the persisted encrypted state for signer {signer_id}"); + let Some(state_session) = self + .stackerdb + .get_session_mut(&MessageSlotID::EncryptedSignerState) + else { + return Err(ClientError::NotConnected); + }; + + let send_request = || { + state_session + .get_latest_chunks(&[signer_id.0]) + .map_err(backoff::Error::transient) + }; + + let Some(chunk) = retry_with_exponential_backoff(send_request)?.pop().ok_or( + ClientError::UnexpectedResponseFormat(format!( + "Missing response for state session request for signer {}", + signer_id + )), + )? + else { + debug!("No persisted state for signer {signer_id}"); + return Ok(None); + }; + + if chunk.is_empty() { + debug!("Empty persisted state for signer {signer_id}"); + return Ok(None); + } + + let SignerMessage::EncryptedSignerState(state) = + read_next::(&mut chunk.as_slice())? + else { + error!("Wrong message type stored in signer state slot for signer {signer_id}"); + return Ok(None); + }; + + Ok(Some(state)) + } + + /// Retrieve the signer set this stackerdb client is attached to + pub fn get_signer_set(&self) -> u32 { + self.stackerdb.get_signer_set() + } + + /// Retrieve the signer slot ID + pub fn get_signer_slot_id(&self) -> SignerSlotID { + self.stackerdb.get_signer_slot_id() + } +} + +#[cfg(test)] +mod tests { + use std::thread::spawn; + use std::time::Duration; + + use blockstack_lib::chainstate::stacks::{ + TransactionAnchorMode, TransactionAuth, TransactionPayload, TransactionPostConditionMode, + TransactionSmartContract, TransactionVersion, + }; + use blockstack_lib::util_lib::strings::StacksString; + use clarity::codec::StacksMessageCodec; + use clarity::types::chainstate::StacksPrivateKey; + use libstackerdb::StackerDBChunkAckData; + + use super::*; + use crate::client::tests::{generate_signer_config, mock_server_from_config, write_response}; + use crate::config::GlobalConfig; + + #[test] + fn get_signer_transactions_should_succeed() { + let config = GlobalConfig::load_from_file("./src/tests/conf/signer-0.toml").unwrap(); + let signer_config = generate_signer_config(&config, 5, 20); + let mut manager = StackerDBManager::from(&signer_config); + let sk = StacksPrivateKey::new(); + let tx = StacksTransaction { + version: TransactionVersion::Testnet, + chain_id: 0, + auth: TransactionAuth::from_p2pkh(&sk).unwrap(), + anchor_mode: TransactionAnchorMode::Any, + post_condition_mode: TransactionPostConditionMode::Allow, + post_conditions: vec![], + payload: TransactionPayload::SmartContract( + TransactionSmartContract { + name: "test-contract".into(), + code_body: StacksString::from_str("(/ 1 0)").unwrap(), + }, + None, + ), + }; + + let signer_message = SignerMessage::Transactions(vec![tx.clone()]); + let message = signer_message.serialize_to_vec(); + + let signer_slot_ids = vec![SignerSlotID(0), SignerSlotID(1)]; + let h = spawn(move || manager.get_next_transactions(&signer_slot_ids)); + let mut response_bytes = b"HTTP/1.1 200 OK\n\n".to_vec(); + response_bytes.extend(message); + let mock_server = mock_server_from_config(&config); + write_response(mock_server, response_bytes.as_slice()); + + let signer_message = SignerMessage::Transactions(vec![]); + let message = signer_message.serialize_to_vec(); + let mut response_bytes = b"HTTP/1.1 200 OK\n\n".to_vec(); + response_bytes.extend(message); + let mock_server = mock_server_from_config(&config); + write_response(mock_server, response_bytes.as_slice()); + + let transactions = h.join().unwrap().unwrap(); + assert_eq!(transactions, vec![tx]); + } + + #[test] + fn send_signer_message_should_succeed() { + let config = GlobalConfig::load_from_file("./src/tests/conf/signer-1.toml").unwrap(); + let signer_config = generate_signer_config(&config, 5, 20); + let mut stackerdb = StackerDBManager::from(&signer_config); + + let sk = StacksPrivateKey::new(); + let tx = StacksTransaction { + version: TransactionVersion::Testnet, + chain_id: 0, + auth: TransactionAuth::from_p2pkh(&sk).unwrap(), + anchor_mode: TransactionAnchorMode::Any, + post_condition_mode: TransactionPostConditionMode::Allow, + post_conditions: vec![], + payload: TransactionPayload::SmartContract( + TransactionSmartContract { + name: "test-contract".into(), + code_body: StacksString::from_str("(/ 1 0)").unwrap(), + }, + None, + ), + }; + + let signer_message = SignerMessage::Transactions(vec![tx]); + let ack = StackerDBChunkAckData { + accepted: true, + reason: None, + metadata: None, + code: None, + }; + let mock_server = mock_server_from_config(&config); + let h = spawn(move || stackerdb.send_message_with_retry(signer_message)); + let mut response_bytes = b"HTTP/1.1 200 OK\n\n".to_vec(); + let payload = serde_json::to_string(&ack).expect("Failed to serialize ack"); + response_bytes.extend(payload.as_bytes()); + std::thread::sleep(Duration::from_millis(500)); + write_response(mock_server, response_bytes.as_slice()); + assert_eq!(ack, h.join().unwrap().unwrap()); + } +} diff --git a/stackslib/Cargo.toml b/stackslib/Cargo.toml index be753371153..b7967fe2491 100644 --- a/stackslib/Cargo.toml +++ b/stackslib/Cargo.toml @@ -58,6 +58,7 @@ libstackerdb = { path = "../libstackerdb" } siphasher = "0.3.7" wsts = { workspace = true } hashbrown = { workspace = true } +rusqlite = { workspace = true } [target.'cfg(not(any(target_os = "macos",target_os="windows", target_arch = "arm" )))'.dependencies] tikv-jemallocator = {workspace = true} @@ -79,10 +80,6 @@ features = ["arbitrary_precision", "unbounded_depth"] version = "0.24.3" features = ["serde", "recovery"] -[dependencies.rusqlite] -version = "=0.24.2" -features = ["blob", "serde_json", "i128_blob", "bundled", "trace"] - [dependencies.ed25519-dalek] workspace = true diff --git a/stackslib/src/burnchains/affirmation.rs b/stackslib/src/burnchains/affirmation.rs index 1f43a7cd7a1..fc7398c9ff5 100644 --- a/stackslib/src/burnchains/affirmation.rs +++ b/stackslib/src/burnchains/affirmation.rs @@ -974,7 +974,8 @@ pub fn find_heaviest_block_commit( // found debug!( "PoX anchor block-commit {},{},{} has {} burnt, {} confs", - &opdata.txid, opdata.block_height, opdata.vtxindex, most_burnt, most_confs + &opdata.txid, opdata.block_height, opdata.vtxindex, most_burnt, most_confs; + "stacks_block_hash" => opdata.block_header_hash ); // sanity check -- there should be exactly as many confirmations on the suspected @@ -996,7 +997,9 @@ pub fn find_heaviest_block_commit( if *op_ancestor_height == ancestor_block && *op_ancestor_vtxindex == ancestor_vtxindex { - debug!("Block-commit {},{} descends from likely PoX anchor block {},{}", opdata.block_height, opdata.vtxindex, op_ancestor_height, op_ancestor_vtxindex); + debug!("Block-commit {},{} descends from likely PoX anchor block {},{}", opdata.block_height, opdata.vtxindex, op_ancestor_height, op_ancestor_vtxindex; + "stacks_block_hash" => opdata.block_header_hash + ); block_descendancy.push(true); if !found_conf { conf_count += 1; @@ -1004,11 +1007,15 @@ pub fn find_heaviest_block_commit( } burn_count += opdata.burn_fee; } else { - debug!("Block-commit {},{} does NOT descend from likely PoX anchor block {},{}", opdata.block_height, opdata.vtxindex, ancestor_block, ancestor_vtxindex); + debug!("Block-commit {},{} does NOT descend from likely PoX anchor block {},{}", opdata.block_height, opdata.vtxindex, ancestor_block, ancestor_vtxindex; + "stacks_block_hash" => opdata.block_header_hash + ); block_descendancy.push(false); } } else { - debug!("Block-commit {},{} does NOT descend from likely PoX anchor block {},{}", opdata.block_height, opdata.vtxindex, ancestor_block, ancestor_vtxindex); + debug!("Block-commit {},{} does NOT descend from likely PoX anchor block {},{}", opdata.block_height, opdata.vtxindex, ancestor_block, ancestor_vtxindex; + "stacks_block_hash" => opdata.block_header_hash + ); block_descendancy.push(false); } } diff --git a/stackslib/src/burnchains/bitcoin/spv.rs b/stackslib/src/burnchains/bitcoin/spv.rs index a5627db4df9..82cbb7b7f66 100644 --- a/stackslib/src/burnchains/bitcoin/spv.rs +++ b/stackslib/src/burnchains/bitcoin/spv.rs @@ -20,7 +20,7 @@ use std::ops::Deref; use std::{cmp, fs}; use rusqlite::types::{FromSql, FromSqlError, FromSqlResult, ToSql, ToSqlOutput, ValueRef}; -use rusqlite::{Connection, OpenFlags, OptionalExtension, Row, Transaction, NO_PARAMS}; +use rusqlite::{params, Connection, OpenFlags, OptionalExtension, Row, Transaction}; use stacks_common::deps_common::bitcoin::blockdata::block::{BlockHeader, LoneBlockHeader}; use stacks_common::deps_common::bitcoin::blockdata::constants::genesis_block; use stacks_common::deps_common::bitcoin::network::constants::Network; @@ -31,6 +31,7 @@ use stacks_common::deps_common::bitcoin::network::serialize::{ }; use stacks_common::deps_common::bitcoin::util::hash::Sha256dHash; use stacks_common::types::chainstate::BurnchainHeaderHash; +use stacks_common::types::sqlite::NO_PARAMS; use stacks_common::util::hash::{hex_bytes, to_hex}; use stacks_common::util::uint::Uint256; use stacks_common::util::{get_epoch_time_secs, log}; @@ -423,7 +424,7 @@ impl SpvClient { } let tx = self.tx_begin()?; - let args: &[&dyn ToSql] = &[&u64_to_sql(interval)?, &work.to_hex_be()]; + let args = params![u64_to_sql(interval)?, work.to_hex_be()]; tx.execute( "INSERT OR REPLACE INTO chain_work (interval,work) VALUES (?1,?2)", args, @@ -706,7 +707,7 @@ impl SpvClient { let mut headers = vec![]; let sql_query = "SELECT * FROM headers WHERE height >= ?1 AND height < ?2 ORDER BY height"; - let sql_args: &[&dyn ToSql] = &[&u64_to_sql(start_block)?, &u64_to_sql(end_block)?]; + let sql_args = params![u64_to_sql(start_block)?, u64_to_sql(end_block)?]; let mut stmt = self .headers_db @@ -748,15 +749,15 @@ impl SpvClient { let sql = "INSERT OR REPLACE INTO headers (version, prev_blockhash, merkle_root, time, bits, nonce, height, hash) VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7, ?8)"; - let args: &[&dyn ToSql] = &[ - &header.version, - &header.prev_blockhash, - &header.merkle_root, - &header.time, - &header.bits, - &header.nonce, - &u64_to_sql(height)?, - &BurnchainHeaderHash::from_bitcoin_hash(&header.bitcoin_hash()), + let args = params![ + header.version, + header.prev_blockhash, + header.merkle_root, + header.time, + header.bits, + header.nonce, + u64_to_sql(height)?, + BurnchainHeaderHash::from_bitcoin_hash(&header.bitcoin_hash()), ]; tx.execute(sql, args) diff --git a/stackslib/src/burnchains/burnchain.rs b/stackslib/src/burnchains/burnchain.rs index 51084b56538..4002c253aed 100644 --- a/stackslib/src/burnchains/burnchain.rs +++ b/stackslib/src/burnchains/burnchain.rs @@ -62,8 +62,8 @@ use crate::chainstate::stacks::address::{PoxAddress, StacksAddressExtensions}; use crate::chainstate::stacks::boot::{POX_2_MAINNET_CODE, POX_2_TESTNET_CODE}; use crate::chainstate::stacks::StacksPublicKey; use crate::core::{ - StacksEpoch, StacksEpochId, MINING_COMMITMENT_WINDOW, NETWORK_ID_MAINNET, NETWORK_ID_TESTNET, - PEER_VERSION_MAINNET, PEER_VERSION_TESTNET, STACKS_2_0_LAST_BLOCK_TO_PROCESS, + StacksEpoch, StacksEpochId, NETWORK_ID_MAINNET, NETWORK_ID_TESTNET, PEER_VERSION_MAINNET, + PEER_VERSION_TESTNET, STACKS_2_0_LAST_BLOCK_TO_PROCESS, }; use crate::deps; use crate::monitoring::update_burnchain_height; @@ -90,6 +90,59 @@ impl BurnchainStateTransition { burn_dist: vec![], accepted_ops: vec![], consumed_leader_keys: vec![], + windowed_block_commits: vec![], + windowed_missed_commits: vec![], + } + } + + /// Get the transaction IDs of all accepted burnchain operations in this block + pub fn txids(&self) -> Vec { + self.accepted_ops.iter().map(|ref op| op.txid()).collect() + } + + /// Get the sum of all burnchain tokens spent in this burnchain block's accepted operations + /// (i.e. applies to block commits). + /// Returns None on overflow. + pub fn total_burns(&self) -> Option { + self.accepted_ops.iter().try_fold(0u64, |acc, op| { + let bf = match op { + BlockstackOperationType::LeaderBlockCommit(ref op) => op.burn_fee, + _ => 0, + }; + acc.checked_add(bf) + }) + } + + /// Get the median block burn from the window. If the window length is even, then the average + /// of the two middle-most values will be returned. + pub fn windowed_median_burns(&self) -> Option { + let block_total_burn_opts = self.windowed_block_commits.iter().map(|block_commits| { + block_commits + .iter() + .try_fold(0u64, |acc, op| acc.checked_add(op.burn_fee)) + }); + + let mut block_total_burns = vec![]; + for burn_opt in block_total_burn_opts.into_iter() { + block_total_burns.push(burn_opt?); + } + + block_total_burns.sort(); + + if block_total_burns.len() == 0 { + return Some(0); + } else if block_total_burns.len() == 1 { + return Some(block_total_burns[0]); + } else if block_total_burns.len() % 2 != 0 { + let idx = block_total_burns.len() / 2; + return block_total_burns.get(idx).map(|b| *b); + } else { + // NOTE: the `- 1` is safe because block_total_burns.len() >= 2 + let idx_left = block_total_burns.len() / 2 - 1; + let idx_right = block_total_burns.len() / 2; + let burn_left = block_total_burns.get(idx_left)?; + let burn_right = block_total_burns.get(idx_right)?; + return Some((burn_left + burn_right) / 2); } } @@ -158,10 +211,26 @@ impl BurnchainStateTransition { }) .epoch_id; + // what was the epoch at the start of this window? + let window_start_epoch_id = SortitionDB::get_stacks_epoch( + sort_tx, + parent_snapshot + .block_height + .saturating_sub(epoch_id.mining_commitment_window().into()), + )? + .unwrap_or_else(|| { + panic!( + "FATAL: no epoch defined at burn height {}", + parent_snapshot.block_height - u64::from(epoch_id.mining_commitment_window()) + ) + }) + .epoch_id; + if !burnchain.is_in_prepare_phase(parent_snapshot.block_height + 1) && !burnchain .pox_constants .is_after_pox_sunset_end(parent_snapshot.block_height + 1, epoch_id) + && (epoch_id < StacksEpochId::Epoch30 || window_start_epoch_id == epoch_id) { // PoX reward-phase is active! // build a map of intended sortition -> missed commit for the missed commits @@ -177,11 +246,14 @@ impl BurnchainStateTransition { } } - for blocks_back in 0..(MINING_COMMITMENT_WINDOW - 1) { + for blocks_back in 0..(epoch_id.mining_commitment_window() - 1) { if parent_snapshot.block_height < (blocks_back as u64) { debug!("Mining commitment window shortened because block height is less than window size"; - "block_height" => %parent_snapshot.block_height, - "window_size" => %MINING_COMMITMENT_WINDOW); + "block_height" => %parent_snapshot.block_height, + "window_size" => %epoch_id.mining_commitment_window(), + "burn_block_hash" => %parent_snapshot.burn_header_hash, + "consensus_hash" => %parent_snapshot.consensus_hash + ); break; } let block_height = parent_snapshot.block_height - (blocks_back as u64); @@ -202,11 +274,21 @@ impl BurnchainStateTransition { windowed_missed_commits.push(missed_commits_at_height); } + test_debug!( + "Block {} is in a reward phase with PoX. Miner commit window is {}: {:?}", + parent_snapshot.block_height + 1, + windowed_block_commits.len(), + &windowed_block_commits; + "burn_block_hash" => %parent_snapshot.burn_header_hash, + "consensus_hash" => %parent_snapshot.consensus_hash + ); } else { - // PoX reward-phase is not active + // PoX reward-phase is not active, or we're starting a new epoch debug!( - "Block {} is in a prepare phase or post-PoX sunset, so no windowing will take place", - parent_snapshot.block_height + 1 + "Block {} is in a prepare phase, in the post-PoX sunset, or in an epoch transition, so no windowing will take place", + parent_snapshot.block_height + 1; + "burn_block_hash" => %parent_snapshot.burn_header_hash, + "consensus_hash" => %parent_snapshot.consensus_hash ); assert_eq!(windowed_block_commits.len(), 1); @@ -244,8 +326,9 @@ impl BurnchainStateTransition { // calculate the burn distribution from these operations. // The resulting distribution will contain the user burns that match block commits let burn_dist = BurnSamplePoint::make_min_median_distribution( - windowed_block_commits, - windowed_missed_commits, + epoch_id.mining_commitment_window(), + windowed_block_commits.clone(), + windowed_missed_commits.clone(), burn_blocks, ); BurnSamplePoint::prometheus_update_miner_commitments(&burn_dist); @@ -266,7 +349,8 @@ impl BurnchainStateTransition { for op in all_block_commits.values() { warn!( "REJECTED({}) block commit {} at {},{}: Committed to an already-consumed VRF key", - op.block_height, &op.txid, op.block_height, op.vtxindex + op.block_height, &op.txid, op.block_height, op.vtxindex; + "stacks_block_hash" => %op.block_header_hash ); } @@ -276,6 +360,8 @@ impl BurnchainStateTransition { burn_dist, accepted_ops, consumed_leader_keys, + windowed_block_commits, + windowed_missed_commits, }) } } @@ -473,16 +559,18 @@ impl Burnchain { .reward_cycle_to_block_height(self.first_block_height, reward_cycle) } - pub fn next_reward_cycle(&self, block_height: u64) -> Option { + /// Compute the reward cycle ID of the PoX reward set which is active as of this burn_height. + /// The reward set is calculated at reward cycle index 1, so if this block height is at or after + /// reward cycle index 1, then this behaves like `block_height_to_reward_cycle()`. However, + /// if it's reward cycle index is 0, then it belongs to the previous reward cycle. + pub fn pox_reward_cycle(&self, block_height: u64) -> Option { let cycle = self.block_height_to_reward_cycle(block_height)?; let effective_height = block_height.checked_sub(self.first_block_height)?; - let next_bump = if effective_height % u64::from(self.pox_constants.reward_cycle_length) == 0 - { - 0 + if effective_height % u64::from(self.pox_constants.reward_cycle_length) == 0 { + Some(cycle.saturating_sub(1)) } else { - 1 - }; - Some(cycle + next_bump) + Some(cycle) + } } pub fn block_height_to_reward_cycle(&self, block_height: u64) -> Option { @@ -921,7 +1009,8 @@ impl Burnchain { // duplicate warn!( "REJECTED({}) leader key register {} at {},{}: Duplicate VRF key", - data.block_height, &data.txid, data.block_height, data.vtxindex + data.block_height, &data.txid, data.block_height, data.vtxindex; + "consensus_hash" => %data.consensus_hash ); false } else { @@ -991,7 +1080,7 @@ impl Burnchain { "prev_reward_cycle" => %prev_reward_cycle, "this_reward_cycle" => %this_reward_cycle, "block_height" => %block_height, - "cycle-length" => %burnchain.pox_constants.reward_cycle_length + "cycle_length" => %burnchain.pox_constants.reward_cycle_length, ); update_pox_affirmation_maps(burnchain_db, indexer, prev_reward_cycle, burnchain)?; } @@ -1000,7 +1089,7 @@ impl Burnchain { /// Hand off the block to the ChainsCoordinator _and_ process the sortition /// *only* to be used by legacy stacks node interfaces, like the Helium node - pub fn process_block_and_sortition_deprecated( + fn process_block_and_sortition_deprecated( db: &mut SortitionDB, burnchain_db: &mut BurnchainDB, burnchain: &Burnchain, @@ -1232,7 +1321,8 @@ impl Burnchain { "Parsed block {} (epoch {}) in {}ms", burnchain_block.block_height(), cur_epoch.epoch_id, - parse_end.saturating_sub(parse_start) + parse_end.saturating_sub(parse_start); + "burn_block_hash" => %burnchain_block.block_hash() ); db_send @@ -1270,7 +1360,8 @@ impl Burnchain { debug!( "Inserted block {} in {}ms", burnchain_block.block_height(), - insert_end.saturating_sub(insert_start) + insert_end.saturating_sub(insert_start); + "burn_block_hash" => %burnchain_block.block_hash() ); } Ok(last_processed) @@ -1567,7 +1658,8 @@ impl Burnchain { "Parsed block {} (in epoch {}) in {}ms", burnchain_block.block_height(), cur_epoch.epoch_id, - parse_end.saturating_sub(parse_start) + parse_end.saturating_sub(parse_start); + "burn_block_hash" => %burnchain_block.block_hash() ); db_send @@ -1619,7 +1711,8 @@ impl Burnchain { debug!( "Inserted block {} in {}ms", burnchain_block.block_height(), - insert_end.saturating_sub(insert_start) + insert_end.saturating_sub(insert_start); + "burn_block_hash" => %burnchain_block.block_hash() ); } Ok(last_processed) diff --git a/stackslib/src/burnchains/db.rs b/stackslib/src/burnchains/db.rs index f7da4a0ae99..384047ccd44 100644 --- a/stackslib/src/burnchains/db.rs +++ b/stackslib/src/burnchains/db.rs @@ -19,9 +19,10 @@ use std::path::Path; use std::{cmp, fmt, fs, io}; use rusqlite::types::ToSql; -use rusqlite::{Connection, OpenFlags, OptionalExtension, Row, Transaction, NO_PARAMS}; +use rusqlite::{params, Connection, OpenFlags, OptionalExtension, Row, Transaction}; use serde_json; use stacks_common::types::chainstate::BurnchainHeaderHash; +use stacks_common::types::sqlite::NO_PARAMS; use crate::burnchains::affirmation::*; use crate::burnchains::{ @@ -321,12 +322,12 @@ impl<'a> BurnchainDBTransaction<'a> { let sql = "INSERT OR IGNORE INTO burnchain_db_block_headers (block_height, block_hash, parent_block_hash, num_txs, timestamp) VALUES (?, ?, ?, ?, ?)"; - let args: &[&dyn ToSql] = &[ - &u64_to_sql(header.block_height)?, - &header.block_hash, - &header.parent_block_hash, - &u64_to_sql(header.num_txs)?, - &u64_to_sql(header.timestamp)?, + let args = params![ + u64_to_sql(header.block_height)?, + header.block_hash, + header.parent_block_hash, + u64_to_sql(header.num_txs)?, + u64_to_sql(header.timestamp)?, ]; let affected_rows = self.sql_tx.execute(sql, args)?; if affected_rows == 0 { @@ -346,7 +347,7 @@ impl<'a> BurnchainDBTransaction<'a> { ) -> Result { let weight = affirmation_map.weight(); let sql = "INSERT INTO affirmation_maps (affirmation_map,weight) VALUES (?1,?2)"; - let args: &[&dyn ToSql] = &[&affirmation_map.encode(), &u64_to_sql(weight)?]; + let args = params![affirmation_map.encode(), u64_to_sql(weight)?]; match self.sql_tx.execute(sql, args) { Ok(_) => { let am_id = BurnchainDB::get_affirmation_map_id(&self.sql_tx, &affirmation_map)? @@ -367,11 +368,11 @@ impl<'a> BurnchainDBTransaction<'a> { affirmation_id: u64, ) -> Result<(), DBError> { let sql = "UPDATE block_commit_metadata SET affirmation_id = ?1, anchor_block_descendant = ?2 WHERE burn_block_hash = ?3 AND txid = ?4"; - let args: &[&dyn ToSql] = &[ - &u64_to_sql(affirmation_id)?, - &opt_u64_to_sql(anchor_block_descendant)?, - &block_commit.burn_header_hash, - &block_commit.txid, + let args = params![ + u64_to_sql(affirmation_id)?, + opt_u64_to_sql(anchor_block_descendant)?, + block_commit.burn_header_hash, + block_commit.txid, ]; match self.sql_tx.execute(sql, args) { Ok(_) => { @@ -390,26 +391,26 @@ impl<'a> BurnchainDBTransaction<'a> { target_reward_cycle: u64, ) -> Result<(), DBError> { let sql = "INSERT OR REPLACE INTO anchor_blocks (reward_cycle) VALUES (?1)"; - let args: &[&dyn ToSql] = &[&u64_to_sql(target_reward_cycle)?]; + let args = params![u64_to_sql(target_reward_cycle)?]; self.sql_tx .execute(sql, args) .map_err(|e| DBError::SqliteError(e))?; let sql = "UPDATE block_commit_metadata SET anchor_block = ?1 WHERE burn_block_hash = ?2 AND txid = ?3"; - let args: &[&dyn ToSql] = &[ - &u64_to_sql(target_reward_cycle)?, - &block_commit.burn_header_hash, - &block_commit.txid, + let args = params![ + u64_to_sql(target_reward_cycle)?, + block_commit.burn_header_hash, + block_commit.txid, ]; match self.sql_tx.execute(sql, args) { Ok(_) => { info!( - "Set anchor block for reward cycle {} to {},{},{},{}", - target_reward_cycle, - &block_commit.burn_header_hash, - &block_commit.txid, - &block_commit.block_height, - &block_commit.vtxindex + "Setting anchor block for reward cycle {target_reward_cycle}."; + "burn_block_hash" => %block_commit.burn_header_hash, + "stacks_block_hash" => %block_commit.block_header_hash, + "block_commit_txid" => %block_commit.txid, + "block_commit_height" => block_commit.block_height, + "block_commit_vtxindex" => block_commit.vtxindex, ); Ok(()) } @@ -420,7 +421,7 @@ impl<'a> BurnchainDBTransaction<'a> { /// Unmark all block-commit(s) that were anchor block(s) for this reward cycle. pub fn clear_anchor_block(&self, reward_cycle: u64) -> Result<(), DBError> { let sql = "UPDATE block_commit_metadata SET anchor_block = NULL WHERE anchor_block = ?1"; - let args: &[&dyn ToSql] = &[&u64_to_sql(reward_cycle)?]; + let args = params![u64_to_sql(reward_cycle)?]; self.sql_tx .execute(sql, args) .map(|_| ()) @@ -877,14 +878,14 @@ impl<'a> BurnchainDBTransaction<'a> { (burn_block_hash, txid, block_height, vtxindex, anchor_block, anchor_block_descendant, affirmation_id) VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7)"; let mut stmt = self.sql_tx.prepare(commit_metadata_sql)?; - let args: &[&dyn ToSql] = &[ - &bcm.burn_block_hash, - &bcm.txid, - &u64_to_sql(bcm.block_height)?, - &bcm.vtxindex, - &opt_u64_to_sql(bcm.anchor_block)?, - &opt_u64_to_sql(bcm.anchor_block_descendant)?, - &u64_to_sql(bcm.affirmation_id)?, + let args = params![ + bcm.burn_block_hash, + bcm.txid, + u64_to_sql(bcm.block_height)?, + bcm.vtxindex, + opt_u64_to_sql(bcm.anchor_block)?, + opt_u64_to_sql(bcm.anchor_block_descendant)?, + u64_to_sql(bcm.affirmation_id)?, ]; stmt.execute(args)?; Ok(()) @@ -903,7 +904,7 @@ impl<'a> BurnchainDBTransaction<'a> { for op in block_ops.iter() { let serialized_op = serde_json::to_string(op).expect("Failed to serialize parsed BlockstackOp"); - let args: &[&dyn ToSql] = &[&block_header.block_hash, op.txid_ref(), &serialized_op]; + let args = params![block_header.block_hash, op.txid_ref(), serialized_op]; stmt.execute(args)?; } @@ -946,6 +947,8 @@ impl<'a> BurnchainDBTransaction<'a> { BurnchainDB::inner_get_canonical_chain_tip(&self.sql_tx) } + // TODO: add tests from mutation testing results #4837 + #[cfg_attr(test, mutants::skip)] /// You'd only do this in network emergencies, where node operators are expected to declare an /// anchor block missing (or present). Ideally there'd be a smart contract somewhere for this. pub fn set_override_affirmation_map( @@ -956,7 +959,7 @@ impl<'a> BurnchainDBTransaction<'a> { assert_eq!((affirmation_map.len() as u64) + 1, reward_cycle); let qry = "INSERT OR REPLACE INTO overrides (reward_cycle, affirmation_map) VALUES (?1, ?2)"; - let args: &[&dyn ToSql] = &[&u64_to_sql(reward_cycle)?, &affirmation_map.encode()]; + let args = params![u64_to_sql(reward_cycle)?, affirmation_map.encode()]; let mut stmt = self.sql_tx.prepare(qry)?; stmt.execute(args)?; @@ -965,7 +968,7 @@ impl<'a> BurnchainDBTransaction<'a> { pub fn clear_override_affirmation_map(&self, reward_cycle: u64) -> Result<(), DBError> { let qry = "DELETE FROM overrides WHERE reward_cycle = ?1"; - let args: &[&dyn ToSql] = &[&u64_to_sql(reward_cycle)?]; + let args = params![u64_to_sql(reward_cycle)?]; let mut stmt = self.sql_tx.prepare(qry)?; stmt.execute(args)?; @@ -978,7 +981,7 @@ impl BurnchainDB { let exists: i64 = query_row( self.conn(), "SELECT 1 FROM sqlite_master WHERE type = 'index' AND name = ?1", - &[LAST_BURNCHAIN_DB_INDEX], + params![LAST_BURNCHAIN_DB_INDEX], )? .unwrap_or(0); if exists == 0 { @@ -1036,7 +1039,7 @@ impl BurnchainDB { db_tx.sql_tx.execute_batch(BURNCHAIN_DB_SCHEMA)?; db_tx.sql_tx.execute( "INSERT INTO db_config (version) VALUES (?1)", - &[&BURNCHAIN_DB_VERSION], + params![&BURNCHAIN_DB_VERSION], )?; let first_block_header = BurnchainBlockHeader { @@ -1118,17 +1121,21 @@ impl BurnchainDB { height: u64, ) -> Result { let qry = "SELECT 1 FROM burnchain_db_block_headers WHERE block_height = ?1"; - let args = &[&u64_to_sql(height)?]; + let args = params![u64_to_sql(height)?]; let res: Option = query_row(conn, qry, args)?; Ok(res.is_some()) } - pub fn get_burnchain_header( + pub fn get_burnchain_header( conn: &DBConn, + indexer: &B, height: u64, ) -> Result, BurnchainError> { - let qry = "SELECT * FROM burnchain_db_block_headers WHERE block_height = ?1"; - let args = &[&u64_to_sql(height)?]; + let Some(hdr) = indexer.read_burnchain_header(height)? else { + return Ok(None); + }; + let qry = "SELECT * FROM burnchain_db_block_headers WHERE block_hash = ?1"; + let args = params![hdr.block_hash]; let res: Option = query_row(conn, qry, args)?; Ok(res) } @@ -1141,9 +1148,9 @@ impl BurnchainDB { "SELECT * FROM burnchain_db_block_headers WHERE block_hash = ? LIMIT 1"; let block_ops_qry = "SELECT DISTINCT * FROM burnchain_db_block_ops WHERE block_hash = ?"; - let block_header = query_row(conn, block_header_qry, &[block])? + let block_header = query_row(conn, block_header_qry, params![block])? .ok_or_else(|| BurnchainError::UnknownBlock(block.clone()))?; - let block_ops = query_rows(conn, block_ops_qry, &[block])?; + let block_ops = query_rows(conn, block_ops_qry, params![block])?; Ok(BurnchainBlockData { header: block_header, @@ -1158,7 +1165,7 @@ impl BurnchainDB { ) -> Option { let qry = "SELECT DISTINCT op FROM burnchain_db_block_ops WHERE txid = ?1 AND block_hash = ?2"; - let args: &[&dyn ToSql] = &[txid, burn_header_hash]; + let args = params![txid, burn_header_hash]; match query_row(conn, qry, args) { Ok(res) => res, @@ -1177,7 +1184,7 @@ impl BurnchainDB { txid: &Txid, ) -> Option { let qry = "SELECT DISTINCT op FROM burnchain_db_block_ops WHERE txid = ?1"; - let args: &[&dyn ToSql] = &[txid]; + let args = params![txid]; let ops: Vec = query_rows(&self.conn, qry, args).expect("FATAL: burnchain DB query error"); @@ -1248,7 +1255,7 @@ impl BurnchainDB { affirmation_id: u64, ) -> Result, DBError> { let sql = "SELECT affirmation_map FROM affirmation_maps WHERE affirmation_id = ?1"; - let args: &[&dyn ToSql] = &[&u64_to_sql(affirmation_id)?]; + let args = params![&u64_to_sql(affirmation_id)?]; query_row(conn, sql, args) } @@ -1257,7 +1264,7 @@ impl BurnchainDB { affirmation_id: u64, ) -> Result, DBError> { let sql = "SELECT weight FROM affirmation_maps WHERE affirmation_id = ?1"; - let args: &[&dyn ToSql] = &[&u64_to_sql(affirmation_id)?]; + let args = params![&u64_to_sql(affirmation_id)?]; query_row(conn, sql, args) } @@ -1266,7 +1273,7 @@ impl BurnchainDB { affirmation_map: &AffirmationMap, ) -> Result, DBError> { let sql = "SELECT affirmation_id FROM affirmation_maps WHERE affirmation_map = ?1"; - let args: &[&dyn ToSql] = &[&affirmation_map.encode()]; + let args = params![&affirmation_map.encode()]; query_row(conn, sql, args) } @@ -1276,7 +1283,7 @@ impl BurnchainDB { txid: &Txid, ) -> Result, DBError> { let sql = "SELECT affirmation_id FROM block_commit_metadata WHERE burn_block_hash = ?1 AND txid = ?2"; - let args: &[&dyn ToSql] = &[burn_header_hash, txid]; + let args = params![burn_header_hash, txid]; query_row(conn, sql, args) } @@ -1297,13 +1304,13 @@ impl BurnchainDB { txid: &Txid, ) -> Result { let sql = "SELECT 1 FROM block_commit_metadata WHERE anchor_block IS NOT NULL AND burn_block_hash = ?1 AND txid = ?2"; - let args: &[&dyn ToSql] = &[burn_header_hash, txid]; + let args = params![burn_header_hash, txid]; query_row(conn, sql, args)?.ok_or(DBError::NotFoundError) } pub fn has_anchor_block(conn: &DBConn, reward_cycle: u64) -> Result { let sql = "SELECT 1 FROM block_commit_metadata WHERE anchor_block = ?1"; - let args: &[&dyn ToSql] = &[&u64_to_sql(reward_cycle)?]; + let args = params![u64_to_sql(reward_cycle)?]; Ok(query_row::(conn, sql, args)?.is_some()) } @@ -1312,7 +1319,7 @@ impl BurnchainDB { reward_cycle: u64, ) -> Result, DBError> { let sql = "SELECT * FROM block_commit_metadata WHERE anchor_block = ?1"; - let args: &[&dyn ToSql] = &[&u64_to_sql(reward_cycle)?]; + let args = params![u64_to_sql(reward_cycle)?]; let metadatas: Vec = query_rows(conn, sql, args)?; Ok(metadatas) @@ -1324,7 +1331,7 @@ impl BurnchainDB { reward_cycle: u64, ) -> Result, DBError> { let sql = "SELECT * FROM block_commit_metadata WHERE anchor_block = ?1"; - let args: &[&dyn ToSql] = &[&u64_to_sql(reward_cycle)?]; + let args = params![u64_to_sql(reward_cycle)?]; let metadatas: Vec = query_rows(conn, sql, args)?; for metadata in metadatas { @@ -1365,7 +1372,7 @@ impl BurnchainDB { ) -> Result, DBError> { let sql = "SELECT * FROM block_commit_metadata WHERE anchor_block = ?1 AND burn_block_hash = ?2"; - let args: &[&dyn ToSql] = &[&u64_to_sql(reward_cycle)?, anchor_block_burn_header_hash]; + let args = params![u64_to_sql(reward_cycle)?, anchor_block_burn_header_hash]; if let Some(commit_metadata) = query_row::(conn, sql, args)? { let commit = BurnchainDB::get_block_commit( conn, @@ -1413,7 +1420,9 @@ impl BurnchainDB { ) -> Result, BurnchainError> { let header = block.header(); debug!("Storing new burnchain block"; - "burn_header_hash" => %header.block_hash.to_string()); + "burn_block_hash" => %header.block_hash, + "block_height" => header.block_height + ); let mut blockstack_ops = self.get_blockstack_transactions(burnchain, indexer, block, &header, epoch_id); apply_blockstack_txs_safety_checks(header.block_height, &mut blockstack_ops); @@ -1443,7 +1452,7 @@ impl BurnchainDB { vtxindex: u16, ) -> Result, DBError> { let qry = "SELECT txid FROM block_commit_metadata WHERE block_height = ?1 AND vtxindex = ?2 AND burn_block_hash = ?3"; - let args: &[&dyn ToSql] = &[&block_ptr, &vtxindex, &header_hash]; + let args = params![block_ptr, vtxindex, header_hash]; let txid = match query_row(&conn, qry, args) { Ok(Some(txid)) => txid, Ok(None) => { @@ -1489,7 +1498,7 @@ impl BurnchainDB { burn_block_hash: &BurnchainHeaderHash, txid: &Txid, ) -> Result, DBError> { - let args: &[&dyn ToSql] = &[burn_block_hash, txid]; + let args = params![burn_block_hash, txid]; query_row_panic( conn, "SELECT * FROM block_commit_metadata WHERE burn_block_hash = ?1 AND txid = ?2", @@ -1604,7 +1613,7 @@ impl BurnchainDB { let am_opt: Option = query_row_panic( conn, "SELECT affirmation_map FROM overrides WHERE reward_cycle = ?1", - &[&u64_to_sql(reward_cycle)?], + params![u64_to_sql(reward_cycle)?], || format!("BUG: more than one override affirmation map for the same reward cycle"), )?; if let Some(am) = &am_opt { diff --git a/stackslib/src/burnchains/mod.rs b/stackslib/src/burnchains/mod.rs index 26511e152cc..30cd9f81eef 100644 --- a/stackslib/src/burnchains/mod.rs +++ b/stackslib/src/burnchains/mod.rs @@ -37,9 +37,11 @@ use self::bitcoin::{ Error as btc_error, }; use crate::chainstate::burn::distribution::BurnSamplePoint; -use crate::chainstate::burn::operations::leader_block_commit::OUTPUTS_PER_COMMIT; +use crate::chainstate::burn::operations::leader_block_commit::{ + MissedBlockCommit, OUTPUTS_PER_COMMIT, +}; use crate::chainstate::burn::operations::{ - BlockstackOperationType, Error as op_error, LeaderKeyRegisterOp, + BlockstackOperationType, Error as op_error, LeaderBlockCommitOp, LeaderKeyRegisterOp, }; use crate::chainstate::stacks::address::PoxAddress; use crate::chainstate::stacks::boot::{POX_1_NAME, POX_2_NAME, POX_3_NAME, POX_4_NAME}; @@ -466,6 +468,8 @@ impl PoxConstants { ) // total liquid supply is 40000000000000000 µSTX } + // TODO: add tests from mutation testing results #4838 + #[cfg_attr(test, mutants::skip)] pub fn regtest_default() -> PoxConstants { PoxConstants::new( 5, @@ -646,6 +650,8 @@ pub struct BurnchainStateTransition { pub burn_dist: Vec, pub accepted_ops: Vec, pub consumed_leader_keys: Vec, + pub windowed_block_commits: Vec>, + pub windowed_missed_commits: Vec>, } /// The burnchain block's state transition's ops: @@ -689,6 +695,8 @@ pub enum Error { CoordinatorClosed, /// Graceful shutdown error ShutdownInitiated, + /// No epoch defined at that height + NoStacksEpoch, } impl fmt::Display for Error { @@ -714,6 +722,10 @@ impl fmt::Display for Error { ), Error::CoordinatorClosed => write!(f, "ChainsCoordinator channel hung up"), Error::ShutdownInitiated => write!(f, "Graceful shutdown was initiated"), + Error::NoStacksEpoch => write!( + f, + "No Stacks epoch is defined at the height being evaluated" + ), } } } @@ -737,6 +749,7 @@ impl error::Error for Error { Error::NonCanonicalPoxId(_, _) => None, Error::CoordinatorClosed => None, Error::ShutdownInitiated => None, + Error::NoStacksEpoch => None, } } } diff --git a/stackslib/src/burnchains/tests/burnchain.rs b/stackslib/src/burnchains/tests/burnchain.rs index 97c9366feca..b08d7a097e5 100644 --- a/stackslib/src/burnchains/tests/burnchain.rs +++ b/stackslib/src/burnchains/tests/burnchain.rs @@ -153,6 +153,7 @@ fn test_process_block_ops() { let block_commit_1 = LeaderBlockCommitOp { sunset_burn: 0, + treatment: vec![], commit_outs: vec![], block_header_hash: BlockHeaderHash::from_bytes( &hex_bytes("2222222222222222222222222222222222222222222222222222222222222222").unwrap(), @@ -191,6 +192,7 @@ fn test_process_block_ops() { let block_commit_2 = LeaderBlockCommitOp { sunset_burn: 0, + treatment: vec![], commit_outs: vec![], block_header_hash: BlockHeaderHash::from_bytes( &hex_bytes("2222222222222222222222222222222222222222222222222222222222222223").unwrap(), @@ -229,6 +231,7 @@ fn test_process_block_ops() { let block_commit_3 = LeaderBlockCommitOp { sunset_burn: 0, + treatment: vec![], commit_outs: vec![], block_header_hash: BlockHeaderHash::from_bytes( &hex_bytes("2222222222222222222222222222222222222222222222222222222222222224").unwrap(), @@ -547,11 +550,12 @@ fn test_process_block_ops() { // everything will be included let block_opshash_124 = OpsHash::from_txids( - &block_ops_124 + block_ops_124 .clone() .into_iter() .map(|bo| bo.txid()) - .collect(), + .collect::>() + .as_slice(), ); let block_prev_chs_124 = vec![ block_123_snapshot.consensus_hash.clone(), @@ -777,6 +781,7 @@ fn test_burn_snapshot_sequence() { if i > 0 { let next_block_commit = LeaderBlockCommitOp { sunset_burn: 0, + treatment: vec![], commit_outs: vec![], block_header_hash: BlockHeaderHash::from_bytes(&vec![ i, i, i, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, diff --git a/stackslib/src/burnchains/tests/db.rs b/stackslib/src/burnchains/tests/db.rs index 9c3b5ee4770..f14243d049d 100644 --- a/stackslib/src/burnchains/tests/db.rs +++ b/stackslib/src/burnchains/tests/db.rs @@ -16,11 +16,12 @@ use std::cmp; -use rusqlite::{ToSql, NO_PARAMS}; +use rusqlite::ToSql; use stacks_common::address::AddressHashMode; use stacks_common::deps_common::bitcoin::blockdata::transaction::Transaction as BtcTx; use stacks_common::deps_common::bitcoin::network::serialize::deserialize; use stacks_common::types::chainstate::StacksAddress; +use stacks_common::types::sqlite::NO_PARAMS; use stacks_common::util::hash::*; use super::*; @@ -53,8 +54,10 @@ impl BurnchainDB { &self, block_hash: &BurnchainHeaderHash, ) -> Result, BurnchainError> { + use rusqlite::params; + let sql = "SELECT op FROM burnchain_db_block_ops WHERE block_hash = ?1"; - let args: &[&dyn ToSql] = &[block_hash]; + let args = params![block_hash]; let mut ops: Vec = query_rows(&self.conn, sql, args)?; ops.sort_by(|a, b| a.vtxindex().cmp(&b.vtxindex())); Ok(ops) @@ -231,6 +234,15 @@ fn test_store_and_fetch() { } assert_eq!(&header, &non_canonical_block.header()); + // when we get a block header by its height, it's canonical + for (height, header) in headers.iter().enumerate() { + let hdr = BurnchainDB::get_burnchain_header(burnchain_db.conn(), &headers, height as u64) + .unwrap() + .unwrap(); + assert!(headers.iter().find(|h| **h == hdr).is_some()); + assert_ne!(hdr, non_canonical_block.header()); + } + let looked_up_canon = burnchain_db.get_canonical_chain_tip().unwrap(); assert_eq!(&looked_up_canon, &canonical_block.header()); @@ -506,6 +518,7 @@ pub fn make_simple_block_commit( let block_height = burn_header.block_height; let mut new_op = LeaderBlockCommitOp { sunset_burn: 0, + treatment: vec![], block_header_hash: block_hash, new_seed: VRFSeed([1u8; 32]), parent_block_ptr: 0, diff --git a/stackslib/src/burnchains/tests/mod.rs b/stackslib/src/burnchains/tests/mod.rs index fc7f6993a8f..31e29c0b26e 100644 --- a/stackslib/src/burnchains/tests/mod.rs +++ b/stackslib/src/burnchains/tests/mod.rs @@ -161,10 +161,7 @@ impl TestMiner { } pub fn last_block_commit(&self) -> Option { - match self.block_commits.len() { - 0 => None, - x => Some(self.block_commits[x - 1].clone()), - } + self.block_commits.last().cloned() } pub fn block_commit_at(&self, idx: usize) -> Option { @@ -405,7 +402,6 @@ impl TestBurnchainBlock { new_seed: Option, epoch_marker: u8, ) -> LeaderBlockCommitOp { - let input = (Txid([0; 32]), 0); let pubks = miner .privks .iter() @@ -442,8 +438,22 @@ impl TestBurnchainBlock { &last_snapshot_with_sortition.sortition_id, ) .expect("FATAL: failed to read block commit"); + + let input = SortitionDB::get_last_block_commit_by_sender(ic.conn(), &apparent_sender) + .unwrap() + .map(|commit| (commit.txid.clone(), 1 + (commit.commit_outs.len() as u32))) + .unwrap_or((Txid([0x00; 32]), 0)); + + test_debug!("Last input from {} is {:?}", &apparent_sender, &input); + let mut txop = match get_commit_res { Some(parent) => { + test_debug!( + "Block-commit for {} (burn height {}) builds on leader block-commit {:?}", + block_hash, + self.block_height, + &parent + ); let txop = LeaderBlockCommitOp::new( block_hash, self.block_height, diff --git a/stackslib/src/chainstate/burn/atc.rs b/stackslib/src/chainstate/burn/atc.rs new file mode 100644 index 00000000000..510c5d20327 --- /dev/null +++ b/stackslib/src/chainstate/burn/atc.rs @@ -0,0 +1,1529 @@ +// Copyright (C) 2013-2020 Blockstack PBC, a public benefit corporation +// Copyright (C) 2020 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use stacks_common::util::uint::Uint256; + +use crate::stacks_common::util::uint::BitArray; + +/// A fixed-point numerical representation for ATC. The integer and fractional parts are both 64 +/// bits. Internally, this is a Uint256 so that safe addition and multiplication can be done. +/// +/// Bits 0-63 are the fraction. +/// Bits 64-127 are the integer. +/// Bits 128-256 are 0's to facilitate safe addition and multiplication. +/// +/// The reasons we use this instead of f64 for ATC calculations are as follows: +/// * This avoids unrepresentable states, like NaN or +/- INF +/// * This avoids ambiguous states, like +0.0 and -0.0. +/// * This integrates better into the sortition-sampling system, which uses a u256 to represent a +/// probability range (which is what this is going to be used for) +#[derive(Debug, Clone, PartialEq, Copy, Eq, Hash)] +pub(crate) struct AtcRational(pub(crate) Uint256); +impl AtcRational { + /// Construct from a fraction (numerator and denominator) + pub fn frac(num: u64, den: u64) -> Self { + Self((Uint256::from_u64(num) << 64) / Uint256::from_u64(den)) + } + + /// 0 value + pub fn zero() -> Self { + Self(Uint256::zero()) + } + + /// 1 value + pub fn one() -> Self { + Self(Uint256::one() << 64) + } + + /// largest value less than 1 + pub fn one_sup() -> Self { + Self((Uint256::one() << 64) - Uint256::from_u64(1)) + } + + /// Largest possible value (corresponds to u64::MAX.u64::MAX) + pub fn max() -> Self { + Self((Uint256::from_u64(u64::MAX) << 64) | Uint256::from_u64(u64::MAX)) + } + + /// Get integer part + pub fn ipart(&self) -> u64 { + (self.0 >> 64).low_u64() + } + + /// Is this value overflowed? + pub fn is_overflowed(&self) -> bool { + self.0 > Self::max().0 + } + + /// Checked addition + pub fn add(&self, other: &AtcRational) -> Option { + // NOTE: this is always safe since u128::MAX + u128::MAX < Uint256::max() + let sum = AtcRational(self.0 + other.0); + if sum.is_overflowed() { + return None; + } + Some(sum) + } + + /// Checked subtraction + pub fn sub(&self, other: &AtcRational) -> Option { + if self.0 < other.0 { + return None; + } + Some(AtcRational(self.0 - other.0)) + } + + /// Checked multiplication + pub fn mul(&self, other: &AtcRational) -> Option { + // NOTE: this is always safe since u128::MAX * u128::MAX < Uint256::max() + let prod = AtcRational((self.0 * other.0) >> 64); + if prod.is_overflowed() { + return None; + } + Some(prod) + } + + /// Minimum of self and other + pub fn min(&self, other: &AtcRational) -> Self { + if self.0 < other.0 { + Self(self.0.clone()) + } else { + Self(other.0.clone()) + } + } + + /// Hex representation of the inner bits + pub fn to_hex(&self) -> String { + self.0.to_hex_be() + } + + /// Inner u256 ref + pub fn inner(&self) -> &Uint256 { + &self.0 + } + + /// Inner u256, for conversion to something a BurnSamplePoint can use + pub fn into_inner(self) -> Uint256 { + self.0 + } + + /// Convert to a BurnSamplePoint probability for use in calculating a sortition + pub fn into_sortition_probability(self) -> Uint256 { + // AtcRational's fractional part is only 64 bits, so we need to scale it up so that it occupies the + // upper 64 bits of the burn sample point ranges so as to accurately represent the fraction + // of mining power the null miner has. + let prob_u256 = if self.inner() >= Self::one().inner() { + // prevent left-shift overflow + Self::one_sup().into_inner() << 192 + } else { + self.into_inner() << 192 + }; + prob_u256 + } +} + +/// Pre-calculated 1024-member lookup table for the null miner advantage function, as AtcRational +/// fixed point integers. The first item corresponds to the value of the function at 0.0, and the +/// last item corresponds to the function at 1.0 - (1.0 / 1024.0). The input to a function is the +/// assumed total commit carryover -- the ratio between what the winning miner paid in this +/// block-commit to the median of what they historically paid (for an epoch-defined search window +/// size). A value greater than 1.0 means that the miner paid all of the assumed commit +/// carry-over, and the null miner has negligible chances of winning. A value less than 1.0 means +/// that the miner underpaid relative to their past performance, and the closer to 0.0 this ratio +/// is, the more likely the null miner wins and this miner loses. +/// +/// This table is generated with `make_null_miner_lookup_table()` above. +pub(crate) const ATC_LOOKUP: [AtcRational; 1024] = [ + AtcRational(Uint256([14665006693661589504, 0, 0, 0])), + AtcRational(Uint256([14663943061084833792, 0, 0, 0])), + AtcRational(Uint256([14662867262262108160, 0, 0, 0])), + AtcRational(Uint256([14661779159858638848, 0, 0, 0])), + AtcRational(Uint256([14660678615031697408, 0, 0, 0])), + AtcRational(Uint256([14659565487415023616, 0, 0, 0])), + AtcRational(Uint256([14658439635103131648, 0, 0, 0])), + AtcRational(Uint256([14657300914635431936, 0, 0, 0])), + AtcRational(Uint256([14656149180980262912, 0, 0, 0])), + AtcRational(Uint256([14654984287518758912, 0, 0, 0])), + AtcRational(Uint256([14653806086028572672, 0, 0, 0])), + AtcRational(Uint256([14652614426667460608, 0, 0, 0])), + AtcRational(Uint256([14651409157956749312, 0, 0, 0])), + AtcRational(Uint256([14650190126764625920, 0, 0, 0])), + AtcRational(Uint256([14648957178289305600, 0, 0, 0])), + AtcRational(Uint256([14647710156042049536, 0, 0, 0])), + AtcRational(Uint256([14646448901830051840, 0, 0, 0])), + AtcRational(Uint256([14645173255739158528, 0, 0, 0])), + AtcRational(Uint256([14643883056116467712, 0, 0, 0])), + AtcRational(Uint256([14642578139552755712, 0, 0, 0])), + AtcRational(Uint256([14641258340864796672, 0, 0, 0])), + AtcRational(Uint256([14639923493077501952, 0, 0, 0])), + AtcRational(Uint256([14638573427405920256, 0, 0, 0])), + AtcRational(Uint256([14637207973237102592, 0, 0, 0])), + AtcRational(Uint256([14635826958111819776, 0, 0, 0])), + AtcRational(Uint256([14634430207706118144, 0, 0, 0])), + AtcRational(Uint256([14633017545812742144, 0, 0, 0])), + AtcRational(Uint256([14631588794322399232, 0, 0, 0])), + AtcRational(Uint256([14630143773204873216, 0, 0, 0])), + AtcRational(Uint256([14628682300490010624, 0, 0, 0])), + AtcRational(Uint256([14627204192248543232, 0, 0, 0])), + AtcRational(Uint256([14625709262572754944, 0, 0, 0])), + AtcRational(Uint256([14624197323557009408, 0, 0, 0])), + AtcRational(Uint256([14622668185278134272, 0, 0, 0])), + AtcRational(Uint256([14621121655775633408, 0, 0, 0])), + AtcRational(Uint256([14619557541031794688, 0, 0, 0])), + AtcRational(Uint256([14617975644951588864, 0, 0, 0])), + AtcRational(Uint256([14616375769342470144, 0, 0, 0])), + AtcRational(Uint256([14614757713894002688, 0, 0, 0])), + AtcRational(Uint256([14613121276157339648, 0, 0, 0])), + AtcRational(Uint256([14611466251524579328, 0, 0, 0])), + AtcRational(Uint256([14609792433207912448, 0, 0, 0])), + AtcRational(Uint256([14608099612218703872, 0, 0, 0])), + AtcRational(Uint256([14606387577346342912, 0, 0, 0])), + AtcRational(Uint256([14604656115137021952, 0, 0, 0])), + AtcRational(Uint256([14602905009872304128, 0, 0, 0])), + AtcRational(Uint256([14601134043547590656, 0, 0, 0])), + AtcRational(Uint256([14599342995850407936, 0, 0, 0])), + AtcRational(Uint256([14597531644138579968, 0, 0, 0])), + AtcRational(Uint256([14595699763418222592, 0, 0, 0])), + AtcRational(Uint256([14593847126321623040, 0, 0, 0])), + AtcRational(Uint256([14591973503084957696, 0, 0, 0])), + AtcRational(Uint256([14590078661525866496, 0, 0, 0])), + AtcRational(Uint256([14588162367020904448, 0, 0, 0])), + AtcRational(Uint256([14586224382482810880, 0, 0, 0])), + AtcRational(Uint256([14584264468337694720, 0, 0, 0])), + AtcRational(Uint256([14582282382502025216, 0, 0, 0])), + AtcRational(Uint256([14580277880359520256, 0, 0, 0])), + AtcRational(Uint256([14578250714737874944, 0, 0, 0])), + AtcRational(Uint256([14576200635885367296, 0, 0, 0])), + AtcRational(Uint256([14574127391447336960, 0, 0, 0])), + AtcRational(Uint256([14572030726442487808, 0, 0, 0])), + AtcRational(Uint256([14569910383239120896, 0, 0, 0])), + AtcRational(Uint256([14567766101531174912, 0, 0, 0])), + AtcRational(Uint256([14565597618314184704, 0, 0, 0])), + AtcRational(Uint256([14563404667861078016, 0, 0, 0])), + AtcRational(Uint256([14561186981697867776, 0, 0, 0])), + AtcRational(Uint256([14558944288579205120, 0, 0, 0])), + AtcRational(Uint256([14556676314463823872, 0, 0, 0])), + AtcRational(Uint256([14554382782489843712, 0, 0, 0])), + AtcRational(Uint256([14552063412949977088, 0, 0, 0])), + AtcRational(Uint256([14549717923266603008, 0, 0, 0])), + AtcRational(Uint256([14547346027966732288, 0, 0, 0])), + AtcRational(Uint256([14544947438656860160, 0, 0, 0])), + AtcRational(Uint256([14542521863997716480, 0, 0, 0])), + AtcRational(Uint256([14540069009678876672, 0, 0, 0])), + AtcRational(Uint256([14537588578393323520, 0, 0, 0])), + AtcRational(Uint256([14535080269811841024, 0, 0, 0])), + AtcRational(Uint256([14532543780557377536, 0, 0, 0])), + AtcRational(Uint256([14529978804179232768, 0, 0, 0])), + AtcRational(Uint256([14527385031127242752, 0, 0, 0])), + AtcRational(Uint256([14524762148725782528, 0, 0, 0])), + AtcRational(Uint256([14522109841147760640, 0, 0, 0])), + AtcRational(Uint256([14519427789388460032, 0, 0, 0])), + AtcRational(Uint256([14516715671239366656, 0, 0, 0])), + AtcRational(Uint256([14513973161261858816, 0, 0, 0])), + AtcRational(Uint256([14511199930760869888, 0, 0, 0])), + AtcRational(Uint256([14508395647758436352, 0, 0, 0])), + AtcRational(Uint256([14505559976967245824, 0, 0, 0])), + AtcRational(Uint256([14502692579764047872, 0, 0, 0])), + AtcRational(Uint256([14499793114163054592, 0, 0, 0])), + AtcRational(Uint256([14496861234789287936, 0, 0, 0])), + AtcRational(Uint256([14493896592851855360, 0, 0, 0])), + AtcRational(Uint256([14490898836117196800, 0, 0, 0])), + AtcRational(Uint256([14487867608882292736, 0, 0, 0])), + AtcRational(Uint256([14484802551947833344, 0, 0, 0])), + AtcRational(Uint256([14481703302591363072, 0, 0, 0])), + AtcRational(Uint256([14478569494540392448, 0, 0, 0])), + AtcRational(Uint256([14475400757945503744, 0, 0, 0])), + AtcRational(Uint256([14472196719353440256, 0, 0, 0])), + AtcRational(Uint256([14468957001680179200, 0, 0, 0])), + AtcRational(Uint256([14465681224184016896, 0, 0, 0])), + AtcRational(Uint256([14462369002438653952, 0, 0, 0])), + AtcRational(Uint256([14459019948306282496, 0, 0, 0])), + AtcRational(Uint256([14455633669910710272, 0, 0, 0])), + AtcRational(Uint256([14452209771610484736, 0, 0, 0])), + AtcRational(Uint256([14448747853972076544, 0, 0, 0])), + AtcRational(Uint256([14445247513743073280, 0, 0, 0])), + AtcRational(Uint256([14441708343825438720, 0, 0, 0])), + AtcRational(Uint256([14438129933248808960, 0, 0, 0])), + AtcRational(Uint256([14434511867143868416, 0, 0, 0])), + AtcRational(Uint256([14430853726715774976, 0, 0, 0])), + AtcRational(Uint256([14427155089217667072, 0, 0, 0])), + AtcRational(Uint256([14423415527924258816, 0, 0, 0])), + AtcRational(Uint256([14419634612105521152, 0, 0, 0])), + AtcRational(Uint256([14415811907000477696, 0, 0, 0])), + AtcRational(Uint256([14411946973791092736, 0, 0, 0])), + AtcRational(Uint256([14408039369576282112, 0, 0, 0])), + AtcRational(Uint256([14404088647346073600, 0, 0, 0])), + AtcRational(Uint256([14400094355955869696, 0, 0, 0])), + AtcRational(Uint256([14396056040100884480, 0, 0, 0])), + AtcRational(Uint256([14391973240290742272, 0, 0, 0])), + AtcRational(Uint256([14387845492824211456, 0, 0, 0])), + AtcRational(Uint256([14383672329764151296, 0, 0, 0])), + AtcRational(Uint256([14379453278912624640, 0, 0, 0])), + AtcRational(Uint256([14375187863786246144, 0, 0, 0])), + AtcRational(Uint256([14370875603591677952, 0, 0, 0])), + AtcRational(Uint256([14366516013201418240, 0, 0, 0])), + AtcRational(Uint256([14362108603129778176, 0, 0, 0])), + AtcRational(Uint256([14357652879509125120, 0, 0, 0])), + AtcRational(Uint256([14353148344066387968, 0, 0, 0])), + AtcRational(Uint256([14348594494099806208, 0, 0, 0])), + AtcRational(Uint256([14343990822456012800, 0, 0, 0])), + AtcRational(Uint256([14339336817507360768, 0, 0, 0])), + AtcRational(Uint256([14334631963129606144, 0, 0, 0])), + AtcRational(Uint256([14329875738679891968, 0, 0, 0])), + AtcRational(Uint256([14325067618975068160, 0, 0, 0])), + AtcRational(Uint256([14320207074270386176, 0, 0, 0])), + AtcRational(Uint256([14315293570238543872, 0, 0, 0])), + AtcRational(Uint256([14310326567949113344, 0, 0, 0])), + AtcRational(Uint256([14305305523848388608, 0, 0, 0])), + AtcRational(Uint256([14300229889739610112, 0, 0, 0])), + AtcRational(Uint256([14295099112763666432, 0, 0, 0])), + AtcRational(Uint256([14289912635380201472, 0, 0, 0])), + AtcRational(Uint256([14284669895349196800, 0, 0, 0])), + AtcRational(Uint256([14279370325713045504, 0, 0, 0])), + AtcRational(Uint256([14274013354779123712, 0, 0, 0])), + AtcRational(Uint256([14268598406102849536, 0, 0, 0])), + AtcRational(Uint256([14263124898471307264, 0, 0, 0])), + AtcRational(Uint256([14257592245887395840, 0, 0, 0])), + AtcRational(Uint256([14251999857554575360, 0, 0, 0])), + AtcRational(Uint256([14246347137862176768, 0, 0, 0])), + AtcRational(Uint256([14240633486371330048, 0, 0, 0])), + AtcRational(Uint256([14234858297801515008, 0, 0, 0])), + AtcRational(Uint256([14229020962017785856, 0, 0, 0])), + AtcRational(Uint256([14223120864018599936, 0, 0, 0])), + AtcRational(Uint256([14217157383924420608, 0, 0, 0])), + AtcRational(Uint256([14211129896966959104, 0, 0, 0])), + AtcRational(Uint256([14205037773479176192, 0, 0, 0])), + AtcRational(Uint256([14198880378886055936, 0, 0, 0])), + AtcRational(Uint256([14192657073696112640, 0, 0, 0])), + AtcRational(Uint256([14186367213493727232, 0, 0, 0])), + AtcRational(Uint256([14180010148932296704, 0, 0, 0])), + AtcRational(Uint256([14173585225728227328, 0, 0, 0])), + AtcRational(Uint256([14167091784655794176, 0, 0, 0])), + AtcRational(Uint256([14160529161542889472, 0, 0, 0])), + AtcRational(Uint256([14153896687267710976, 0, 0, 0])), + AtcRational(Uint256([14147193687756355584, 0, 0, 0])), + AtcRational(Uint256([14140419483981410304, 0, 0, 0])), + AtcRational(Uint256([14133573391961522176, 0, 0, 0])), + AtcRational(Uint256([14126654722761990144, 0, 0, 0])), + AtcRational(Uint256([14119662782496409600, 0, 0, 0])), + AtcRational(Uint256([14112596872329363456, 0, 0, 0])), + AtcRational(Uint256([14105456288480262144, 0, 0, 0])), + AtcRational(Uint256([14098240322228244480, 0, 0, 0])), + AtcRational(Uint256([14090948259918305280, 0, 0, 0])), + AtcRational(Uint256([14083579382968543232, 0, 0, 0])), + AtcRational(Uint256([14076132967878658048, 0, 0, 0])), + AtcRational(Uint256([14068608286239690752, 0, 0, 0])), + AtcRational(Uint256([14061004604745011200, 0, 0, 0])), + AtcRational(Uint256([14053321185202620416, 0, 0, 0])), + AtcRational(Uint256([14045557284548792320, 0, 0, 0])), + AtcRational(Uint256([14037712154863056896, 0, 0, 0])), + AtcRational(Uint256([14029785043384606720, 0, 0, 0])), + AtcRational(Uint256([14021775192530079744, 0, 0, 0])), + AtcRational(Uint256([14013681839912861696, 0, 0, 0])), + AtcRational(Uint256([14005504218363817984, 0, 0, 0])), + AtcRational(Uint256([13997241555953580032, 0, 0, 0])), + AtcRational(Uint256([13988893076016375808, 0, 0, 0])), + AtcRational(Uint256([13980457997175449600, 0, 0, 0])), + AtcRational(Uint256([13971935533370089472, 0, 0, 0])), + AtcRational(Uint256([13963324893884334080, 0, 0, 0])), + AtcRational(Uint256([13954625283377340416, 0, 0, 0])), + AtcRational(Uint256([13945835901915490304, 0, 0, 0])), + AtcRational(Uint256([13936955945006243840, 0, 0, 0])), + AtcRational(Uint256([13927984603633807360, 0, 0, 0])), + AtcRational(Uint256([13918921064296585216, 0, 0, 0])), + AtcRational(Uint256([13909764509046546432, 0, 0, 0])), + AtcRational(Uint256([13900514115530459136, 0, 0, 0])), + AtcRational(Uint256([13891169057033058304, 0, 0, 0])), + AtcRational(Uint256([13881728502522195968, 0, 0, 0])), + AtcRational(Uint256([13872191616696016896, 0, 0, 0])), + AtcRational(Uint256([13862557560032120832, 0, 0, 0])), + AtcRational(Uint256([13852825488838891520, 0, 0, 0])), + AtcRational(Uint256([13842994555308853248, 0, 0, 0])), + AtcRational(Uint256([13833063907574269952, 0, 0, 0])), + AtcRational(Uint256([13823032689764870144, 0, 0, 0])), + AtcRational(Uint256([13812900042067845120, 0, 0, 0])), + AtcRational(Uint256([13802665100790099968, 0, 0, 0])), + AtcRational(Uint256([13792326998422816768, 0, 0, 0])), + AtcRational(Uint256([13781884863708366848, 0, 0, 0])), + AtcRational(Uint256([13771337821709592576, 0, 0, 0])), + AtcRational(Uint256([13760684993881540608, 0, 0, 0])), + AtcRational(Uint256([13749925498145615872, 0, 0, 0])), + AtcRational(Uint256([13739058448966279168, 0, 0, 0])), + AtcRational(Uint256([13728082957430233088, 0, 0, 0])), + AtcRational(Uint256([13716998131328233472, 0, 0, 0])), + AtcRational(Uint256([13705803075239473152, 0, 0, 0])), + AtcRational(Uint256([13694496890618648576, 0, 0, 0])), + AtcRational(Uint256([13683078675885682688, 0, 0, 0])), + AtcRational(Uint256([13671547526518214656, 0, 0, 0])), + AtcRational(Uint256([13659902535146829824, 0, 0, 0])), + AtcRational(Uint256([13648142791653093376, 0, 0, 0])), + AtcRational(Uint256([13636267383270436864, 0, 0, 0])), + AtcRational(Uint256([13624275394687913984, 0, 0, 0])), + AtcRational(Uint256([13612165908156874752, 0, 0, 0])), + AtcRational(Uint256([13599938003600584704, 0, 0, 0])), + AtcRational(Uint256([13587590758726844416, 0, 0, 0])), + AtcRational(Uint256([13575123249143625728, 0, 0, 0])), + AtcRational(Uint256([13562534548477763584, 0, 0, 0])), + AtcRational(Uint256([13549823728496742400, 0, 0, 0])), + AtcRational(Uint256([13536989859233630208, 0, 0, 0])), + AtcRational(Uint256([13524032009115150336, 0, 0, 0])), + AtcRational(Uint256([13510949245092962304, 0, 0, 0])), + AtcRational(Uint256([13497740632778186752, 0, 0, 0])), + AtcRational(Uint256([13484405236579164160, 0, 0, 0])), + AtcRational(Uint256([13470942119842529280, 0, 0, 0])), + AtcRational(Uint256([13457350344997619712, 0, 0, 0])), + AtcRational(Uint256([13443628973704206336, 0, 0, 0])), + AtcRational(Uint256([13429777067003654144, 0, 0, 0])), + AtcRational(Uint256([13415793685473462272, 0, 0, 0])), + AtcRational(Uint256([13401677889385263104, 0, 0, 0])), + AtcRational(Uint256([13387428738866302976, 0, 0, 0])), + AtcRational(Uint256([13373045294064392192, 0, 0, 0])), + AtcRational(Uint256([13358526615316400128, 0, 0, 0])), + AtcRational(Uint256([13343871763320287232, 0, 0, 0])), + AtcRational(Uint256([13329079799310704640, 0, 0, 0])), + AtcRational(Uint256([13314149785238190080, 0, 0, 0])), + AtcRational(Uint256([13299080783951974400, 0, 0, 0])), + AtcRational(Uint256([13283871859386413056, 0, 0, 0])), + AtcRational(Uint256([13268522076751075328, 0, 0, 0])), + AtcRational(Uint256([13253030502724497408, 0, 0, 0])), + AtcRational(Uint256([13237396205651617792, 0, 0, 0])), + AtcRational(Uint256([13221618255744899072, 0, 0, 0])), + AtcRational(Uint256([13205695725289166848, 0, 0, 0])), + AtcRational(Uint256([13189627688850184192, 0, 0, 0])), + AtcRational(Uint256([13173413223486908416, 0, 0, 0])), + AtcRational(Uint256([13157051408967542784, 0, 0, 0])), + AtcRational(Uint256([13140541327989270528, 0, 0, 0])), + AtcRational(Uint256([13123882066401785856, 0, 0, 0])), + AtcRational(Uint256([13107072713434537984, 0, 0, 0])), + AtcRational(Uint256([13090112361927747584, 0, 0, 0])), + AtcRational(Uint256([13073000108567144448, 0, 0, 0])), + AtcRational(Uint256([13055735054122481664, 0, 0, 0])), + AtcRational(Uint256([13038316303689742336, 0, 0, 0])), + AtcRational(Uint256([13020742966937124864, 0, 0, 0])), + AtcRational(Uint256([13003014158354718720, 0, 0, 0])), + AtcRational(Uint256([12985128997507874816, 0, 0, 0])), + AtcRational(Uint256([12967086609294301184, 0, 0, 0])), + AtcRational(Uint256([12948886124204806144, 0, 0, 0])), + AtcRational(Uint256([12930526678587715584, 0, 0, 0])), + AtcRational(Uint256([12912007414916904960, 0, 0, 0])), + AtcRational(Uint256([12893327482063446016, 0, 0, 0])), + AtcRational(Uint256([12874486035570843648, 0, 0, 0])), + AtcRational(Uint256([12855482237933809664, 0, 0, 0])), + AtcRational(Uint256([12836315258880561152, 0, 0, 0])), + AtcRational(Uint256([12816984275658594304, 0, 0, 0])), + AtcRational(Uint256([12797488473323913216, 0, 0, 0])), + AtcRational(Uint256([12777827045033641984, 0, 0, 0])), + AtcRational(Uint256([12757999192342022144, 0, 0, 0])), + AtcRational(Uint256([12738004125499680768, 0, 0, 0])), + AtcRational(Uint256([12717841063756201984, 0, 0, 0])), + AtcRational(Uint256([12697509235665854464, 0, 0, 0])), + AtcRational(Uint256([12677007879396530176, 0, 0, 0])), + AtcRational(Uint256([12656336243041691648, 0, 0, 0])), + AtcRational(Uint256([12635493584935419904, 0, 0, 0])), + AtcRational(Uint256([12614479173970364416, 0, 0, 0])), + AtcRational(Uint256([12593292289918617600, 0, 0, 0])), + AtcRational(Uint256([12571932223755370496, 0, 0, 0])), + AtcRational(Uint256([12550398277985329152, 0, 0, 0])), + AtcRational(Uint256([12528689766971766784, 0, 0, 0])), + AtcRational(Uint256([12506806017268160512, 0, 0, 0])), + AtcRational(Uint256([12484746367952306176, 0, 0, 0])), + AtcRational(Uint256([12462510170962810880, 0, 0, 0])), + AtcRational(Uint256([12440096791437899776, 0, 0, 0])), + AtcRational(Uint256([12417505608056395776, 0, 0, 0])), + AtcRational(Uint256([12394736013380814848, 0, 0, 0])), + AtcRational(Uint256([12371787414202433536, 0, 0, 0])), + AtcRational(Uint256([12348659231888226304, 0, 0, 0])), + AtcRational(Uint256([12325350902729566208, 0, 0, 0])), + AtcRational(Uint256([12301861878292580352, 0, 0, 0])), + AtcRational(Uint256([12278191625770014720, 0, 0, 0])), + AtcRational(Uint256([12254339628334479360, 0, 0, 0])), + AtcRational(Uint256([12230305385492973568, 0, 0, 0])), + AtcRational(Uint256([12206088413442545664, 0, 0, 0])), + AtcRational(Uint256([12181688245426927616, 0, 0, 0])), + AtcRational(Uint256([12157104432094023680, 0, 0, 0])), + AtcRational(Uint256([12132336541854107648, 0, 0, 0])), + AtcRational(Uint256([12107384161238581248, 0, 0, 0])), + AtcRational(Uint256([12082246895259109376, 0, 0, 0])), + AtcRational(Uint256([12056924367767033856, 0, 0, 0])), + AtcRational(Uint256([12031416221812840448, 0, 0, 0])), + AtcRational(Uint256([12005722120005560320, 0, 0, 0])), + AtcRational(Uint256([11979841744871907328, 0, 0, 0])), + AtcRational(Uint256([11953774799215020032, 0, 0, 0])), + AtcRational(Uint256([11927521006472566784, 0, 0, 0])), + AtcRational(Uint256([11901080111074107392, 0, 0, 0])), + AtcRational(Uint256([11874451878797459456, 0, 0, 0])), + AtcRational(Uint256([11847636097123960832, 0, 0, 0])), + AtcRational(Uint256([11820632575592335360, 0, 0, 0])), + AtcRational(Uint256([11793441146151079936, 0, 0, 0])), + AtcRational(Uint256([11766061663509092352, 0, 0, 0])), + AtcRational(Uint256([11738494005484369920, 0, 0, 0])), + AtcRational(Uint256([11710738073350592512, 0, 0, 0])), + AtcRational(Uint256([11682793792181340160, 0, 0, 0])), + AtcRational(Uint256([11654661111191783424, 0, 0, 0])), + AtcRational(Uint256([11626340004077604864, 0, 0, 0])), + AtcRational(Uint256([11597830469350934528, 0, 0, 0])), + AtcRational(Uint256([11569132530673096704, 0, 0, 0])), + AtcRational(Uint256([11540246237183952896, 0, 0, 0])), + AtcRational(Uint256([11511171663827582976, 0, 0, 0])), + AtcRational(Uint256([11481908911674114048, 0, 0, 0])), + AtcRational(Uint256([11452458108237473792, 0, 0, 0])), + AtcRational(Uint256([11422819407788793856, 0, 0, 0])), + AtcRational(Uint256([11392992991665272832, 0, 0, 0])), + AtcRational(Uint256([11362979068574269440, 0, 0, 0])), + AtcRational(Uint256([11332777874892353536, 0, 0, 0])), + AtcRational(Uint256([11302389674959124480, 0, 0, 0])), + AtcRational(Uint256([11271814761365499904, 0, 0, 0])), + AtcRational(Uint256([11241053455236325376, 0, 0, 0])), + AtcRational(Uint256([11210106106506956800, 0, 0, 0])), + AtcRational(Uint256([11178973094193678336, 0, 0, 0])), + AtcRational(Uint256([11147654826657650688, 0, 0, 0])), + AtcRational(Uint256([11116151741862152192, 0, 0, 0])), + AtcRational(Uint256([11084464307622914048, 0, 0, 0])), + AtcRational(Uint256([11052593021851269120, 0, 0, 0])), + AtcRational(Uint256([11020538412789880832, 0, 0, 0])), + AtcRational(Uint256([10988301039240828928, 0, 0, 0])), + AtcRational(Uint256([10955881490785785856, 0, 0, 0])), + AtcRational(Uint256([10923280387998085120, 0, 0, 0])), + AtcRational(Uint256([10890498382646384640, 0, 0, 0])), + AtcRational(Uint256([10857536157889769472, 0, 0, 0])), + AtcRational(Uint256([10824394428463968256, 0, 0, 0])), + AtcRational(Uint256([10791073940858529792, 0, 0, 0])), + AtcRational(Uint256([10757575473484689408, 0, 0, 0])), + AtcRational(Uint256([10723899836833691648, 0, 0, 0])), + AtcRational(Uint256([10690047873625384960, 0, 0, 0])), + AtcRational(Uint256([10656020458946807808, 0, 0, 0])), + AtcRational(Uint256([10621818500380600320, 0, 0, 0])), + AtcRational(Uint256([10587442938122995712, 0, 0, 0])), + AtcRational(Uint256([10552894745091184640, 0, 0, 0])), + AtcRational(Uint256([10518174927019845632, 0, 0, 0])), + AtcRational(Uint256([10483284522546655232, 0, 0, 0])), + AtcRational(Uint256([10448224603286523904, 0, 0, 0])), + AtcRational(Uint256([10412996273894438912, 0, 0, 0])), + AtcRational(Uint256([10377600672116664320, 0, 0, 0])), + AtcRational(Uint256([10342038968830132224, 0, 0, 0])), + AtcRational(Uint256([10306312368069857280, 0, 0, 0])), + AtcRational(Uint256([10270422107044188160, 0, 0, 0])), + AtcRational(Uint256([10234369456137705472, 0, 0, 0])), + AtcRational(Uint256([10198155718901680128, 0, 0, 0])), + AtcRational(Uint256([10161782232031832064, 0, 0, 0])), + AtcRational(Uint256([10125250365333327872, 0, 0, 0])), + AtcRational(Uint256([10088561521672830976, 0, 0, 0])), + AtcRational(Uint256([10051717136917477376, 0, 0, 0])), + AtcRational(Uint256([10014718679860666368, 0, 0, 0])), + AtcRational(Uint256([9977567652134516736, 0, 0, 0])), + AtcRational(Uint256([9940265588108912640, 0, 0, 0])), + AtcRational(Uint256([9902814054777008128, 0, 0, 0])), + AtcRational(Uint256([9865214651627091968, 0, 0, 0])), + AtcRational(Uint256([9827469010500773888, 0, 0, 0])), + AtcRational(Uint256([9789578795437342720, 0, 0, 0])), + AtcRational(Uint256([9751545702504284160, 0, 0, 0])), + AtcRational(Uint256([9713371459613874176, 0, 0, 0])), + AtcRational(Uint256([9675057826325798912, 0, 0, 0])), + AtcRational(Uint256([9636606593635780608, 0, 0, 0])), + AtcRational(Uint256([9598019583750131712, 0, 0, 0])), + AtcRational(Uint256([9559298649846272000, 0, 0, 0])), + AtcRational(Uint256([9520445675819153408, 0, 0, 0])), + AtcRational(Uint256([9481462576013621248, 0, 0, 0])), + AtcRational(Uint256([9442351294942703616, 0, 0, 0])), + AtcRational(Uint256([9403113806991841280, 0, 0, 0])), + AtcRational(Uint256([9363752116109119488, 0, 0, 0])), + AtcRational(Uint256([9324268255481511936, 0, 0, 0])), + AtcRational(Uint256([9284664287197179904, 0, 0, 0])), + AtcRational(Uint256([9244942301893949440, 0, 0, 0])), + AtcRational(Uint256([9205104418393949184, 0, 0, 0])), + AtcRational(Uint256([9165152783324563456, 0, 0, 0])), + AtcRational(Uint256([9125089570725771264, 0, 0, 0])), + AtcRational(Uint256([9084916981643961344, 0, 0, 0])), + AtcRational(Uint256([9044637243712360448, 0, 0, 0])), + AtcRational(Uint256([9004252610718200832, 0, 0, 0])), + AtcRational(Uint256([8963765362156744704, 0, 0, 0])), + AtcRational(Uint256([8923177802772338688, 0, 0, 0])), + AtcRational(Uint256([8882492262086646784, 0, 0, 0])), + AtcRational(Uint256([8841711093914219520, 0, 0, 0])), + AtcRational(Uint256([8800836675865615360, 0, 0, 0])), + AtcRational(Uint256([8759871408838231040, 0, 0, 0])), + AtcRational(Uint256([8718817716495054848, 0, 0, 0])), + AtcRational(Uint256([8677678044731567104, 0, 0, 0])), + AtcRational(Uint256([8636454861130998784, 0, 0, 0])), + AtcRational(Uint256([8595150654408180736, 0, 0, 0])), + AtcRational(Uint256([8553767933842236416, 0, 0, 0])), + AtcRational(Uint256([8512309228698363904, 0, 0, 0])), + AtcRational(Uint256([8470777087638975488, 0, 0, 0])), + AtcRational(Uint256([8429174078124461056, 0, 0, 0])), + AtcRational(Uint256([8387502785803874304, 0, 0, 0])), + AtcRational(Uint256([8345765813895795712, 0, 0, 0])), + AtcRational(Uint256([8303965782559726592, 0, 0, 0])), + AtcRational(Uint256([8262105328258275328, 0, 0, 0])), + AtcRational(Uint256([8220187103110477824, 0, 0, 0])), + AtcRational(Uint256([8178213774236573696, 0, 0, 0])), + AtcRational(Uint256([8136188023094564864, 0, 0, 0])), + AtcRational(Uint256([8094112544808916992, 0, 0, 0])), + AtcRational(Uint256([8051990047491715072, 0, 0, 0])), + AtcRational(Uint256([8009823251556677632, 0, 0, 0])), + AtcRational(Uint256([7967614889026356224, 0, 0, 0])), + AtcRational(Uint256([7925367702832887808, 0, 0, 0])), + AtcRational(Uint256([7883084446112715776, 0, 0, 0])), + AtcRational(Uint256([7840767881495595008, 0, 0, 0])), + AtcRational(Uint256([7798420780388343808, 0, 0, 0])), + AtcRational(Uint256([7756045922253651968, 0, 0, 0])), + AtcRational(Uint256([7713646093884422144, 0, 0, 0])), + AtcRational(Uint256([7671224088673970176, 0, 0, 0])), + AtcRational(Uint256([7628782705882552320, 0, 0, 0])), + AtcRational(Uint256([7586324749900575744, 0, 0, 0])), + AtcRational(Uint256([7543853029508941824, 0, 0, 0])), + AtcRational(Uint256([7501370357136906240, 0, 0, 0])), + AtcRational(Uint256([7458879548117898240, 0, 0, 0])), + AtcRational(Uint256([7416383419943693312, 0, 0, 0])), + AtcRational(Uint256([7373884791517374464, 0, 0, 0])), + AtcRational(Uint256([7331386482405493760, 0, 0, 0])), + AtcRational(Uint256([7288891312089871360, 0, 0, 0])), + AtcRational(Uint256([7246402099219427328, 0, 0, 0])), + AtcRational(Uint256([7203921660862483456, 0, 0, 0])), + AtcRational(Uint256([7161452811759982592, 0, 0, 0])), + AtcRational(Uint256([7118998363579975680, 0, 0, 0])), + AtcRational(Uint256([7076561124173879296, 0, 0, 0])), + AtcRational(Uint256([7034143896834856960, 0, 0, 0])), + AtcRational(Uint256([6991749479558778880, 0, 0, 0])), + AtcRational(Uint256([6949380664308144128, 0, 0, 0])), + AtcRational(Uint256([6907040236279402496, 0, 0, 0])), + AtcRational(Uint256([6864730973174070272, 0, 0, 0])), + AtcRational(Uint256([6822455644474029056, 0, 0, 0])), + AtcRational(Uint256([6780217010721434624, 0, 0, 0])), + AtcRational(Uint256([6738017822803616768, 0, 0, 0])), + AtcRational(Uint256([6695860821243351040, 0, 0, 0])), + AtcRational(Uint256([6653748735494901760, 0, 0, 0])), + AtcRational(Uint256([6611684283246219264, 0, 0, 0])), + AtcRational(Uint256([6569670169727631360, 0, 0, 0])), + AtcRational(Uint256([6527709087027459072, 0, 0, 0])), + AtcRational(Uint256([6485803713414843392, 0, 0, 0])), + AtcRational(Uint256([6443956712670195712, 0, 0, 0])), + AtcRational(Uint256([6402170733423590400, 0, 0, 0])), + AtcRational(Uint256([6360448408501444608, 0, 0, 0])), + AtcRational(Uint256([6318792354281820160, 0, 0, 0])), + AtcRational(Uint256([6277205170058672128, 0, 0, 0])), + AtcRational(Uint256([6235689437415347200, 0, 0, 0])), + AtcRational(Uint256([6194247719607663616, 0, 0, 0])), + AtcRational(Uint256([6152882560956841984, 0, 0, 0])), + AtcRational(Uint256([6111596486252597248, 0, 0, 0])), + AtcRational(Uint256([6070392000166668288, 0, 0, 0])), + AtcRational(Uint256([6029271586677042176, 0, 0, 0])), + AtcRational(Uint256([5988237708503158784, 0, 0, 0])), + AtcRational(Uint256([5947292806552320000, 0, 0, 0])), + AtcRational(Uint256([5906439299377565696, 0, 0, 0])), + AtcRational(Uint256([5865679582647235584, 0, 0, 0])), + AtcRational(Uint256([5825016028626446336, 0, 0, 0])), + AtcRational(Uint256([5784450985670685696, 0, 0, 0])), + AtcRational(Uint256([5743986777731734528, 0, 0, 0])), + AtcRational(Uint256([5703625703876088832, 0, 0, 0])), + AtcRational(Uint256([5663370037816086528, 0, 0, 0])), + AtcRational(Uint256([5623222027453882368, 0, 0, 0])), + AtcRational(Uint256([5583183894438436864, 0, 0, 0])), + AtcRational(Uint256([5543257833735676928, 0, 0, 0])), + AtcRational(Uint256([5503446013211941888, 0, 0, 0])), + AtcRational(Uint256([5463750573230858240, 0, 0, 0])), + AtcRational(Uint256([5424173626263745536, 0, 0, 0])), + AtcRational(Uint256([5384717256513666048, 0, 0, 0])), + AtcRational(Uint256([5345383519553192960, 0, 0, 0])), + AtcRational(Uint256([5306174441976003584, 0, 0, 0])), + AtcRational(Uint256([5267092021062338560, 0, 0, 0])), + AtcRational(Uint256([5228138224458407936, 0, 0, 0])), + AtcRational(Uint256([5189314989869789184, 0, 0, 0])), + AtcRational(Uint256([5150624224768840704, 0, 0, 0])), + AtcRational(Uint256([5112067806116179968, 0, 0, 0])), + AtcRational(Uint256([5073647580096222208, 0, 0, 0])), + AtcRational(Uint256([5035365361866804224, 0, 0, 0])), + AtcRational(Uint256([4997222935322875904, 0, 0, 0])), + AtcRational(Uint256([4959222052874251264, 0, 0, 0])), + AtcRational(Uint256([4921364435237403648, 0, 0, 0])), + AtcRational(Uint256([4883651771241251840, 0, 0, 0])), + AtcRational(Uint256([4846085717646911488, 0, 0, 0])), + AtcRational(Uint256([4808667898981359616, 0, 0, 0])), + AtcRational(Uint256([4771399907384928256, 0, 0, 0])), + AtcRational(Uint256([4734283302472590336, 0, 0, 0])), + AtcRational(Uint256([4697319611208928256, 0, 0, 0])), + AtcRational(Uint256([4660510327796715520, 0, 0, 0])), + AtcRational(Uint256([4623856913578997760, 0, 0, 0])), + AtcRational(Uint256([4587360796954596352, 0, 0, 0])), + AtcRational(Uint256([4551023373306879488, 0, 0, 0])), + AtcRational(Uint256([4514846004945721344, 0, 0, 0])), + AtcRational(Uint256([4478830021062493696, 0, 0, 0])), + AtcRational(Uint256([4442976717697962496, 0, 0, 0])), + AtcRational(Uint256([4407287357722949632, 0, 0, 0])), + AtcRational(Uint256([4371763170831599616, 0, 0, 0])), + AtcRational(Uint256([4336405353547112960, 0, 0, 0])), + AtcRational(Uint256([4301215069239754752, 0, 0, 0])), + AtcRational(Uint256([4266193448156999680, 0, 0, 0])), + AtcRational(Uint256([4231341587465614848, 0, 0, 0])), + AtcRational(Uint256([4196660551305514496, 0, 0, 0])), + AtcRational(Uint256([4162151370855192064, 0, 0, 0])), + AtcRational(Uint256([4127815044408539136, 0, 0, 0])), + AtcRational(Uint256([4093652537462862336, 0, 0, 0])), + AtcRational(Uint256([4059664782817884160, 0, 0, 0])), + AtcRational(Uint256([4025852680685536768, 0, 0, 0])), + AtcRational(Uint256([3992217098810330624, 0, 0, 0])), + AtcRational(Uint256([3958758872600086528, 0, 0, 0])), + AtcRational(Uint256([3925478805266815488, 0, 0, 0])), + AtcRational(Uint256([3892377667977526784, 0, 0, 0])), + AtcRational(Uint256([3859456200014740992, 0, 0, 0])), + AtcRational(Uint256([3826715108946479616, 0, 0, 0])), + AtcRational(Uint256([3794155070805506048, 0, 0, 0])), + AtcRational(Uint256([3761776730277590016, 0, 0, 0])), + AtcRational(Uint256([3729580700898548736, 0, 0, 0])), + AtcRational(Uint256([3697567565259854336, 0, 0, 0])), + AtcRational(Uint256([3665737875222543872, 0, 0, 0])), + AtcRational(Uint256([3634092152139219456, 0, 0, 0])), + AtcRational(Uint256([3602630887083875840, 0, 0, 0])), + AtcRational(Uint256([3571354541089344000, 0, 0, 0])), + AtcRational(Uint256([3540263545392078336, 0, 0, 0])), + AtcRational(Uint256([3509358301684075008, 0, 0, 0])), + AtcRational(Uint256([3478639182371662336, 0, 0, 0])), + AtcRational(Uint256([3448106530840935936, 0, 0, 0])), + AtcRational(Uint256([3417760661729580032, 0, 0, 0])), + AtcRational(Uint256([3387601861204853760, 0, 0, 0])), + AtcRational(Uint256([3357630387247493120, 0, 0, 0])), + AtcRational(Uint256([3327846469941282304, 0, 0, 0])), + AtcRational(Uint256([3298250311768075776, 0, 0, 0])), + AtcRational(Uint256([3268842087908014080, 0, 0, 0])), + AtcRational(Uint256([3239621946544709632, 0, 0, 0])), + AtcRational(Uint256([3210590009175161344, 0, 0, 0])), + AtcRational(Uint256([3181746370924176384, 0, 0, 0])), + AtcRational(Uint256([3153091100863047680, 0, 0, 0])), + AtcRational(Uint256([3124624242332286464, 0, 0, 0])), + AtcRational(Uint256([3096345813268148736, 0, 0, 0])), + AtcRational(Uint256([3068255806532773376, 0, 0, 0])), + AtcRational(Uint256([3040354190247658496, 0, 0, 0])), + AtcRational(Uint256([3012640908130307584, 0, 0, 0])), + AtcRational(Uint256([2985115879833786880, 0, 0, 0])), + AtcRational(Uint256([2957779001289008640, 0, 0, 0])), + AtcRational(Uint256([2930630145049504256, 0, 0, 0])), + AtcRational(Uint256([2903669160638502400, 0, 0, 0])), + AtcRational(Uint256([2876895874898083840, 0, 0, 0])), + AtcRational(Uint256([2850310092340229632, 0, 0, 0])), + AtcRational(Uint256([2823911595499543552, 0, 0, 0])), + AtcRational(Uint256([2797700145287476736, 0, 0, 0])), + AtcRational(Uint256([2771675481347832320, 0, 0, 0])), + AtcRational(Uint256([2745837322413390848, 0, 0, 0])), + AtcRational(Uint256([2720185366663441408, 0, 0, 0])), + AtcRational(Uint256([2694719292082065408, 0, 0, 0])), + AtcRational(Uint256([2669438756816964096, 0, 0, 0])), + AtcRational(Uint256([2644343399538680832, 0, 0, 0])), + AtcRational(Uint256([2619432839800029696, 0, 0, 0])), + AtcRational(Uint256([2594706678395571200, 0, 0, 0])), + AtcRational(Uint256([2570164497720961536, 0, 0, 0])), + AtcRational(Uint256([2545805862132034048, 0, 0, 0])), + AtcRational(Uint256([2521630318303431168, 0, 0, 0])), + AtcRational(Uint256([2497637395586657792, 0, 0, 0])), + AtcRational(Uint256([2473826606367389696, 0, 0, 0])), + AtcRational(Uint256([2450197446421903360, 0, 0, 0])), + AtcRational(Uint256([2426749395272486912, 0, 0, 0])), + AtcRational(Uint256([2403481916541677568, 0, 0, 0])), + AtcRational(Uint256([2380394458305224704, 0, 0, 0])), + AtcRational(Uint256([2357486453443613696, 0, 0, 0])), + AtcRational(Uint256([2334757319992054272, 0, 0, 0])), + AtcRational(Uint256([2312206461488791552, 0, 0, 0])), + AtcRational(Uint256([2289833267321639936, 0, 0, 0])), + AtcRational(Uint256([2267637113072605440, 0, 0, 0])), + AtcRational(Uint256([2245617360860510720, 0, 0, 0])), + AtcRational(Uint256([2223773359681494528, 0, 0, 0])), + AtcRational(Uint256([2202104445747299072, 0, 0, 0])), + AtcRational(Uint256([2180609942821237760, 0, 0, 0])), + AtcRational(Uint256([2159289162551755520, 0, 0, 0])), + AtcRational(Uint256([2138141404803482112, 0, 0, 0])), + AtcRational(Uint256([2117165957985701120, 0, 0, 0])), + AtcRational(Uint256([2096362099378140928, 0, 0, 0])), + AtcRational(Uint256([2075729095454018048, 0, 0, 0])), + AtcRational(Uint256([2055266202200243968, 0, 0, 0])), + AtcRational(Uint256([2034972665434736128, 0, 0, 0])), + AtcRational(Uint256([2014847721120749056, 0, 0, 0])), + AtcRational(Uint256([1994890595678173952, 0, 0, 0])), + AtcRational(Uint256([1975100506291729152, 0, 0, 0])), + AtcRational(Uint256([1955476661215996672, 0, 0, 0])), + AtcRational(Uint256([1936018260077233664, 0, 0, 0])), + AtcRational(Uint256([1916724494171921152, 0, 0, 0])), + AtcRational(Uint256([1897594546761984256, 0, 0, 0])), + AtcRational(Uint256([1878627593366651904, 0, 0, 0])), + AtcRational(Uint256([1859822802050898432, 0, 0, 0])), + AtcRational(Uint256([1841179333710439168, 0, 0, 0])), + AtcRational(Uint256([1822696342353232640, 0, 0, 0])), + AtcRational(Uint256([1804372975377456640, 0, 0, 0])), + AtcRational(Uint256([1786208373845930240, 0, 0, 0])), + AtcRational(Uint256([1768201672756947200, 0, 0, 0])), + AtcRational(Uint256([1750352001311492352, 0, 0, 0])), + AtcRational(Uint256([1732658483176829696, 0, 0, 0])), + AtcRational(Uint256([1715120236746417152, 0, 0, 0])), + AtcRational(Uint256([1697736375396155136, 0, 0, 0])), + AtcRational(Uint256([1680506007736934400, 0, 0, 0])), + AtcRational(Uint256([1663428237863474432, 0, 0, 0])), + AtcRational(Uint256([1646502165599439872, 0, 0, 0])), + AtcRational(Uint256([1629726886738828032, 0, 0, 0])), + AtcRational(Uint256([1613101493283616512, 0, 0, 0])), + AtcRational(Uint256([1596625073677668096, 0, 0, 0])), + AtcRational(Uint256([1580296713036883968, 0, 0, 0])), + AtcRational(Uint256([1564115493375614976, 0, 0, 0])), + AtcRational(Uint256([1548080493829320192, 0, 0, 0])), + AtcRational(Uint256([1532190790873484288, 0, 0, 0])), + AtcRational(Uint256([1516445458538788608, 0, 0, 0])), + AtcRational(Uint256([1500843568622554368, 0, 0, 0])), + AtcRational(Uint256([1485384190896454656, 0, 0, 0])), + AtcRational(Uint256([1470066393310513152, 0, 0, 0])), + AtcRational(Uint256([1454889242193393664, 0, 0, 0])), + AtcRational(Uint256([1439851802449000192, 0, 0, 0])), + AtcRational(Uint256([1424953137749395968, 0, 0, 0])), + AtcRational(Uint256([1410192310724064000, 0, 0, 0])), + AtcRational(Uint256([1395568383145516288, 0, 0, 0])), + AtcRational(Uint256([1381080416111280128, 0, 0, 0])), + AtcRational(Uint256([1366727470222276864, 0, 0, 0])), + AtcRational(Uint256([1352508605757614592, 0, 0, 0])), + AtcRational(Uint256([1338422882845812992, 0, 0, 0])), + AtcRational(Uint256([1324469361632493312, 0, 0, 0])), + AtcRational(Uint256([1310647102444549376, 0, 0, 0])), + AtcRational(Uint256([1296955165950824704, 0, 0, 0])), + AtcRational(Uint256([1283392613319330816, 0, 0, 0])), + AtcRational(Uint256([1269958506371023872, 0, 0, 0])), + AtcRational(Uint256([1256651907730177536, 0, 0, 0])), + AtcRational(Uint256([1243471880971367936, 0, 0, 0])), + AtcRational(Uint256([1230417490763117312, 0, 0, 0])), + AtcRational(Uint256([1217487803008212736, 0, 0, 0])), + AtcRational(Uint256([1204681884980741632, 0, 0, 0])), + AtcRational(Uint256([1191998805459865088, 0, 0, 0])), + AtcRational(Uint256([1179437634860375808, 0, 0, 0])), + AtcRational(Uint256([1166997445360058880, 0, 0, 0])), + AtcRational(Uint256([1154677311023903744, 0, 0, 0])), + AtcRational(Uint256([1142476307925186944, 0, 0, 0])), + AtcRational(Uint256([1130393514263474816, 0, 0, 0])), + AtcRational(Uint256([1118428010479570176, 0, 0, 0])), + AtcRational(Uint256([1106578879367446784, 0, 0, 0])), + AtcRational(Uint256([1094845206183200000, 0, 0, 0])), + AtcRational(Uint256([1083226078751057536, 0, 0, 0])), + AtcRational(Uint256([1071720587566481536, 0, 0, 0])), + AtcRational(Uint256([1060327825896404608, 0, 0, 0])), + AtcRational(Uint256([1049046889876627200, 0, 0, 0])), + AtcRational(Uint256([1037876878606426112, 0, 0, 0])), + AtcRational(Uint256([1026816894240403456, 0, 0, 0])), + AtcRational(Uint256([1015866042077617536, 0, 0, 0])), + AtcRational(Uint256([1005023430648028800, 0, 0, 0])), + AtcRational(Uint256([994288171796307968, 0, 0, 0])), + AtcRational(Uint256([983659380763034624, 0, 0, 0])), + AtcRational(Uint256([973136176263332992, 0, 0, 0])), + AtcRational(Uint256([962717680562974336, 0, 0, 0])), + AtcRational(Uint256([952403019551993984, 0, 0, 0])), + AtcRational(Uint256([942191322815853056, 0, 0, 0])), + AtcRational(Uint256([932081723704189696, 0, 0, 0])), + AtcRational(Uint256([922073359397190528, 0, 0, 0])), + AtcRational(Uint256([912165370969629056, 0, 0, 0])), + AtcRational(Uint256([902356903452603136, 0, 0, 0])), + AtcRational(Uint256([892647105893010176, 0, 0, 0])), + AtcRational(Uint256([883035131410800384, 0, 0, 0])), + AtcRational(Uint256([873520137254044800, 0, 0, 0])), + AtcRational(Uint256([864101284851852928, 0, 0, 0])), + AtcRational(Uint256([854777739865181312, 0, 0, 0])), + AtcRational(Uint256([845548672235568384, 0, 0, 0])), + AtcRational(Uint256([836413256231831552, 0, 0, 0])), + AtcRational(Uint256([827370670494766720, 0, 0, 0])), + AtcRational(Uint256([818420098079881728, 0, 0, 0])), + AtcRational(Uint256([809560726498204800, 0, 0, 0])), + AtcRational(Uint256([800791747755200896, 0, 0, 0])), + AtcRational(Uint256([792112358387835392, 0, 0, 0])), + AtcRational(Uint256([783521759499814016, 0, 0, 0])), + AtcRational(Uint256([775019156795042816, 0, 0, 0])), + AtcRational(Uint256([766603760609335424, 0, 0, 0])), + AtcRational(Uint256([758274785940408960, 0, 0, 0])), + AtcRational(Uint256([750031452476196608, 0, 0, 0])), + AtcRational(Uint256([741872984621515008, 0, 0, 0])), + AtcRational(Uint256([733798611523120256, 0, 0, 0])), + AtcRational(Uint256([725807567093184512, 0, 0, 0])), + AtcRational(Uint256([717899090031224448, 0, 0, 0])), + AtcRational(Uint256([710072423844518784, 0, 0, 0])), + AtcRational(Uint256([702326816867043968, 0, 0, 0])), + AtcRational(Uint256([694661522276962432, 0, 0, 0])), + AtcRational(Uint256([687075798112689920, 0, 0, 0])), + AtcRational(Uint256([679568907287580672, 0, 0, 0])), + AtcRational(Uint256([672140117603256192, 0, 0, 0])), + AtcRational(Uint256([664788701761609984, 0, 0, 0])), + AtcRational(Uint256([657513937375516800, 0, 0, 0])), + AtcRational(Uint256([650315106978278272, 0, 0, 0])), + AtcRational(Uint256([643191498031836288, 0, 0, 0])), + AtcRational(Uint256([636142402933774464, 0, 0, 0])), + AtcRational(Uint256([629167119023148800, 0, 0, 0])), + AtcRational(Uint256([622264948585165440, 0, 0, 0])), + AtcRational(Uint256([615435198854739840, 0, 0, 0])), + AtcRational(Uint256([608677182018960512, 0, 0, 0])), + AtcRational(Uint256([601990215218487424, 0, 0, 0])), + AtcRational(Uint256([595373620547912192, 0, 0, 0])), + AtcRational(Uint256([588826725055103488, 0, 0, 0])), + AtcRational(Uint256([582348860739565568, 0, 0, 0])), + AtcRational(Uint256([575939364549835840, 0, 0, 0])), + AtcRational(Uint256([569597578379946176, 0, 0, 0])), + AtcRational(Uint256([563322849064973184, 0, 0, 0])), + AtcRational(Uint256([557114528375699392, 0, 0, 0])), + AtcRational(Uint256([550971973012414144, 0, 0, 0])), + AtcRational(Uint256([544894544597873792, 0, 0, 0])), + AtcRational(Uint256([538881609669446912, 0, 0, 0])), + AtcRational(Uint256([532932539670464960, 0, 0, 0])), + AtcRational(Uint256([527046710940803776, 0, 0, 0])), + AtcRational(Uint256([521223504706716480, 0, 0, 0])), + AtcRational(Uint256([515462307069940352, 0, 0, 0])), + AtcRational(Uint256([509762508996097024, 0, 0, 0])), + AtcRational(Uint256([504123506302410304, 0, 0, 0])), + AtcRational(Uint256([498544699644759936, 0, 0, 0])), + AtcRational(Uint256([493025494504093248, 0, 0, 0])), + AtcRational(Uint256([487565301172211520, 0, 0, 0])), + AtcRational(Uint256([482163534736955520, 0, 0, 0])), + AtcRational(Uint256([476819615066805056, 0, 0, 0])), + AtcRational(Uint256([471532966794915008, 0, 0, 0])), + AtcRational(Uint256([466303019302601600, 0, 0, 0])), + AtcRational(Uint256([461129206702303360, 0, 0, 0])), + AtcRational(Uint256([456010967820029760, 0, 0, 0])), + AtcRational(Uint256([450947746177316224, 0, 0, 0])), + AtcRational(Uint256([445938989972704576, 0, 0, 0])), + AtcRational(Uint256([440984152062762688, 0, 0, 0])), + AtcRational(Uint256([436082689942662912, 0, 0, 0])), + AtcRational(Uint256([431234065726332992, 0, 0, 0])), + AtcRational(Uint256([426437746126196672, 0, 0, 0])), + AtcRational(Uint256([421693202432519040, 0, 0, 0])), + AtcRational(Uint256([416999910492373440, 0, 0, 0])), + AtcRational(Uint256([412357350688240704, 0, 0, 0])), + AtcRational(Uint256([407765007916260352, 0, 0, 0])), + AtcRational(Uint256([403222371564144896, 0, 0, 0])), + AtcRational(Uint256([398728935488772800, 0, 0, 0])), + AtcRational(Uint256([394284197993471488, 0, 0, 0])), + AtcRational(Uint256([389887661805007040, 0, 0, 0])), + AtcRational(Uint256([385538834050291776, 0, 0, 0])), + AtcRational(Uint256([381237226232822592, 0, 0, 0])), + AtcRational(Uint256([376982354208862784, 0, 0, 0])), + AtcRational(Uint256([372773738163379840, 0, 0, 0])), + AtcRational(Uint256([368610902585751744, 0, 0, 0])), + AtcRational(Uint256([364493376245252288, 0, 0, 0])), + AtcRational(Uint256([360420692166327168, 0, 0, 0])), + AtcRational(Uint256([356392387603673216, 0, 0, 0])), + AtcRational(Uint256([352408004017130240, 0, 0, 0])), + AtcRational(Uint256([348467087046397696, 0, 0, 0])), + AtcRational(Uint256([344569186485583936, 0, 0, 0])), + AtcRational(Uint256([340713856257602176, 0, 0, 0])), + AtcRational(Uint256([336900654388419392, 0, 0, 0])), + AtcRational(Uint256([333129142981170944, 0, 0, 0])), + AtcRational(Uint256([329398888190146944, 0, 0, 0])), + AtcRational(Uint256([325709460194663424, 0, 0, 0])), + AtcRational(Uint256([322060433172825088, 0, 0, 0])), + AtcRational(Uint256([318451385275187776, 0, 0, 0])), + AtcRational(Uint256([314881898598331776, 0, 0, 0])), + AtcRational(Uint256([311351559158351808, 0, 0, 0])), + AtcRational(Uint256([307859956864274048, 0, 0, 0])), + AtcRational(Uint256([304406685491404992, 0, 0, 0])), + AtcRational(Uint256([300991342654624192, 0, 0, 0])), + AtcRational(Uint256([297613529781624320, 0, 0, 0])), + AtcRational(Uint256([294272852086108864, 0, 0, 0])), + AtcRational(Uint256([290968918540952256, 0, 0, 0])), + AtcRational(Uint256([287701341851331328, 0, 0, 0])), + AtcRational(Uint256([284469738427833696, 0, 0, 0])), + AtcRational(Uint256([281273728359550304, 0, 0, 0])), + AtcRational(Uint256([278112935387157216, 0, 0, 0])), + AtcRational(Uint256([274986986875995200, 0, 0, 0])), + AtcRational(Uint256([271895513789150592, 0, 0, 0])), + AtcRational(Uint256([268838150660545664, 0, 0, 0])), + AtcRational(Uint256([265814535568041440, 0, 0, 0])), + AtcRational(Uint256([262824310106561728, 0, 0, 0])), + AtcRational(Uint256([259867119361241024, 0, 0, 0])), + AtcRational(Uint256([256942611880603296, 0, 0, 0])), + AtcRational(Uint256([254050439649774752, 0, 0, 0])), + AtcRational(Uint256([251190258063738688, 0, 0, 0])), + AtcRational(Uint256([248361725900633600, 0, 0, 0])), + AtcRational(Uint256([245564505295100640, 0, 0, 0])), + AtcRational(Uint256([242798261711686880, 0, 0, 0])), + AtcRational(Uint256([240062663918305152, 0, 0, 0])), + AtcRational(Uint256([237357383959756160, 0, 0, 0])), + AtcRational(Uint256([234682097131319296, 0, 0, 0])), + AtcRational(Uint256([232036481952410816, 0, 0, 0])), + AtcRational(Uint256([229420220140319360, 0, 0, 0])), + AtcRational(Uint256([226832996584017152, 0, 0, 0])), + AtcRational(Uint256([224274499318053024, 0, 0, 0])), + AtcRational(Uint256([221744419496531680, 0, 0, 0])), + AtcRational(Uint256([219242451367179744, 0, 0, 0])), + AtcRational(Uint256([216768292245502976, 0, 0, 0])), + AtcRational(Uint256([214321642489039520, 0, 0, 0])), + AtcRational(Uint256([211902205471709248, 0, 0, 0])), + AtcRational(Uint256([209509687558263072, 0, 0, 0])), + AtcRational(Uint256([207143798078836928, 0, 0, 0])), + AtcRational(Uint256([204804249303609280, 0, 0, 0])), + AtcRational(Uint256([202490756417568736, 0, 0, 0])), + AtcRational(Uint256([200203037495391232, 0, 0, 0])), + AtcRational(Uint256([197940813476429664, 0, 0, 0])), + AtcRational(Uint256([195703808139820608, 0, 0, 0])), + AtcRational(Uint256([193491748079706688, 0, 0, 0])), + AtcRational(Uint256([191304362680578688, 0, 0, 0])), + AtcRational(Uint256([189141384092740352, 0, 0, 0])), + AtcRational(Uint256([187002547207894304, 0, 0, 0])), + AtcRational(Uint256([184887589634855776, 0, 0, 0])), + AtcRational(Uint256([182796251675390752, 0, 0, 0])), + AtcRational(Uint256([180728276300183808, 0, 0, 0])), + AtcRational(Uint256([178683409124936320, 0, 0, 0])), + AtcRational(Uint256([176661398386595648, 0, 0, 0])), + AtcRational(Uint256([174661994919716768, 0, 0, 0])), + AtcRational(Uint256([172684952132960128, 0, 0, 0])), + AtcRational(Uint256([170730025985722752, 0, 0, 0])), + AtcRational(Uint256([168796974964908128, 0, 0, 0])), + AtcRational(Uint256([166885560061832896, 0, 0, 0])), + AtcRational(Uint256([164995544749272480, 0, 0, 0])), + AtcRational(Uint256([163126694958648032, 0, 0, 0])), + AtcRational(Uint256([161278779057352736, 0, 0, 0])), + AtcRational(Uint256([159451567826220640, 0, 0, 0])), + AtcRational(Uint256([157644834437138816, 0, 0, 0])), + AtcRational(Uint256([155858354430802016, 0, 0, 0])), + AtcRational(Uint256([154091905694611360, 0, 0, 0])), + AtcRational(Uint256([152345268440719328, 0, 0, 0])), + AtcRational(Uint256([150618225184218048, 0, 0, 0])), + AtcRational(Uint256([148910560721475488, 0, 0, 0])), + AtcRational(Uint256([147222062108617056, 0, 0, 0])), + AtcRational(Uint256([145552518640153856, 0, 0, 0])), + AtcRational(Uint256([143901721827759536, 0, 0, 0])), + AtcRational(Uint256([142269465379193696, 0, 0, 0])), + AtcRational(Uint256([140655545177373184, 0, 0, 0])), + AtcRational(Uint256([139059759259593184, 0, 0, 0])), + AtcRational(Uint256([137481907796894496, 0, 0, 0])), + AtcRational(Uint256([135921793073581792, 0, 0, 0])), + AtcRational(Uint256([134379219466889200, 0, 0, 0])), + AtcRational(Uint256([132853993426794880, 0, 0, 0])), + AtcRational(Uint256([131345923455985760, 0, 0, 0])), + AtcRational(Uint256([129854820089970032, 0, 0, 0])), + AtcRational(Uint256([128380495877339056, 0, 0, 0])), + AtcRational(Uint256([126922765360178944, 0, 0, 0])), + AtcRational(Uint256([125481445054629696, 0, 0, 0])), + AtcRational(Uint256([124056353431594704, 0, 0, 0])), + AtcRational(Uint256([122647310897597840, 0, 0, 0])), + AtcRational(Uint256([121254139775789056, 0, 0, 0])), + AtcRational(Uint256([119876664287099296, 0, 0, 0])), + AtcRational(Uint256([118514710531542512, 0, 0, 0])), + AtcRational(Uint256([117168106469665536, 0, 0, 0])), + AtcRational(Uint256([115836681904146544, 0, 0, 0])), + AtcRational(Uint256([114520268461539280, 0, 0, 0])), + AtcRational(Uint256([113218699574165632, 0, 0, 0])), + AtcRational(Uint256([111931810462153952, 0, 0, 0])), + AtcRational(Uint256([110659438115623328, 0, 0, 0])), + AtcRational(Uint256([109401421277014816, 0, 0, 0])), + AtcRational(Uint256([108157600423566912, 0, 0, 0])), + AtcRational(Uint256([106927817749936160, 0, 0, 0])), + AtcRational(Uint256([105711917150963008, 0, 0, 0])), + AtcRational(Uint256([104509744204580720, 0, 0, 0])), + AtcRational(Uint256([103321146154867984, 0, 0, 0])), + AtcRational(Uint256([102145971895245168, 0, 0, 0])), + AtcRational(Uint256([100984071951811872, 0, 0, 0])), + AtcRational(Uint256([99835298466827488, 0, 0, 0])), + AtcRational(Uint256([98699505182332368, 0, 0, 0])), + AtcRational(Uint256([97576547423909568, 0, 0, 0])), + AtcRational(Uint256([96466282084587616, 0, 0, 0])), + AtcRational(Uint256([95368567608881936, 0, 0, 0])), + AtcRational(Uint256([94283263976975168, 0, 0, 0])), + AtcRational(Uint256([93210232689036528, 0, 0, 0])), + AtcRational(Uint256([92149336749677664, 0, 0, 0])), + AtcRational(Uint256([91100440652546432, 0, 0, 0])), + AtcRational(Uint256([90063410365056304, 0, 0, 0])), + AtcRational(Uint256([89038113313251152, 0, 0, 0])), + AtcRational(Uint256([88024418366805744, 0, 0, 0])), + AtcRational(Uint256([87022195824159632, 0, 0, 0])), + AtcRational(Uint256([86031317397784352, 0, 0, 0])), + AtcRational(Uint256([85051656199584336, 0, 0, 0])), + AtcRational(Uint256([84083086726428336, 0, 0, 0])), + AtcRational(Uint256([83125484845813488, 0, 0, 0])), + AtcRational(Uint256([82178727781658848, 0, 0, 0])), + AtcRational(Uint256([81242694100228816, 0, 0, 0])), + AtcRational(Uint256([80317263696186016, 0, 0, 0])), + AtcRational(Uint256([79402317778771824, 0, 0, 0])), + AtcRational(Uint256([78497738858114176, 0, 0, 0])), + AtcRational(Uint256([77603410731662624, 0, 0, 0])), + AtcRational(Uint256([76719218470748448, 0, 0, 0])), + AtcRational(Uint256([75845048407270416, 0, 0, 0])), + AtcRational(Uint256([74980788120504400, 0, 0, 0])), + AtcRational(Uint256([74126326424036208, 0, 0, 0])), + AtcRational(Uint256([73281553352817728, 0, 0, 0])), + AtcRational(Uint256([72446360150344240, 0, 0, 0])), + AtcRational(Uint256([71620639255952600, 0, 0, 0])), + AtcRational(Uint256([70804284292240360, 0, 0, 0])), + AtcRational(Uint256([69997190052603488, 0, 0, 0])), + AtcRational(Uint256([69199252488892648, 0, 0, 0])), + AtcRational(Uint256([68410368699187752, 0, 0, 0])), + AtcRational(Uint256([67630436915688592, 0, 0, 0])), + AtcRational(Uint256([66859356492722160, 0, 0, 0])), + AtcRational(Uint256([66097027894864808, 0, 0, 0])), + AtcRational(Uint256([65343352685178616, 0, 0, 0])), + AtcRational(Uint256([64598233513561880, 0, 0, 0])), + AtcRational(Uint256([63861574105211760, 0, 0, 0])), + AtcRational(Uint256([63133279249198800, 0, 0, 0])), + AtcRational(Uint256([62413254787153008, 0, 0, 0])), + AtcRational(Uint256([61701407602059336, 0, 0, 0])), + AtcRational(Uint256([60997645607163304, 0, 0, 0])), + AtcRational(Uint256([60301877734984648, 0, 0, 0])), + AtcRational(Uint256([59614013926438576, 0, 0, 0])), + AtcRational(Uint256([58933965120064440, 0, 0, 0])), + AtcRational(Uint256([58261643241359936, 0, 0, 0])), + AtcRational(Uint256([57596961192220440, 0, 0, 0])), + AtcRational(Uint256([56939832840483304, 0, 0, 0])), + AtcRational(Uint256([56290173009574848, 0, 0, 0])), + AtcRational(Uint256([55647897468260864, 0, 0, 0])), + AtcRational(Uint256([55012922920498480, 0, 0, 0])), + AtcRational(Uint256([54385166995389032, 0, 0, 0])), + AtcRational(Uint256([53764548237231728, 0, 0, 0])), + AtcRational(Uint256([53150986095676152, 0, 0, 0])), + AtcRational(Uint256([52544400915973480, 0, 0, 0])), + AtcRational(Uint256([51944713929325792, 0, 0, 0])), + AtcRational(Uint256([51351847243332064, 0, 0, 0])), + AtcRational(Uint256([50765723832530176, 0, 0, 0])), + AtcRational(Uint256([50186267529034840, 0, 0, 0])), + AtcRational(Uint256([49613403013269352, 0, 0, 0])), + AtcRational(Uint256([49047055804791736, 0, 0, 0])), + AtcRational(Uint256([48487152253213424, 0, 0, 0])), + AtcRational(Uint256([47933619529210104, 0, 0, 0])), + AtcRational(Uint256([47386385615624248, 0, 0, 0])), + AtcRational(Uint256([46845379298657936, 0, 0, 0])), + AtcRational(Uint256([46310530159155312, 0, 0, 0])), + AtcRational(Uint256([45781768563974600, 0, 0, 0])), + AtcRational(Uint256([45259025657447672, 0, 0, 0])), + AtcRational(Uint256([44742233352927632, 0, 0, 0])), + AtcRational(Uint256([44231324324422752, 0, 0, 0])), + AtcRational(Uint256([43726231998316280, 0, 0, 0])), + AtcRational(Uint256([43226890545171720, 0, 0, 0])), + AtcRational(Uint256([42733234871622224, 0, 0, 0])), + AtcRational(Uint256([42245200612343560, 0, 0, 0])), + AtcRational(Uint256([41762724122110312, 0, 0, 0])), + AtcRational(Uint256([41285742467933752, 0, 0, 0])), + AtcRational(Uint256([40814193421281544, 0, 0, 0])), + AtcRational(Uint256([40348015450377768, 0, 0, 0])), + AtcRational(Uint256([39887147712583024, 0, 0, 0])), + AtcRational(Uint256([39431530046853688, 0, 0, 0])), + AtcRational(Uint256([38981102966279480, 0, 0, 0])), + AtcRational(Uint256([38535807650699128, 0, 0, 0])), + AtcRational(Uint256([38095585939392688, 0, 0, 0])), + AtcRational(Uint256([37660380323850216, 0, 0, 0])), + AtcRational(Uint256([37230133940616360, 0, 0, 0])), + AtcRational(Uint256([36804790564209328, 0, 0, 0])), + AtcRational(Uint256([36384294600114552, 0, 0, 0])), + AtcRational(Uint256([35968591077851516, 0, 0, 0])), + AtcRational(Uint256([35557625644113388, 0, 0, 0])), + AtcRational(Uint256([35151344555979076, 0, 0, 0])), + AtcRational(Uint256([34749694674196404, 0, 0, 0])), + AtcRational(Uint256([34352623456536068, 0, 0, 0])), + AtcRational(Uint256([33960078951215948, 0, 0, 0])), + AtcRational(Uint256([33572009790394584, 0, 0, 0])), + AtcRational(Uint256([33188365183733360, 0, 0, 0])), + AtcRational(Uint256([32809094912027156, 0, 0, 0])), + AtcRational(Uint256([32434149320901908, 0, 0, 0])), + AtcRational(Uint256([32063479314579508, 0, 0, 0])), + AtcRational(Uint256([31697036349708460, 0, 0, 0])), + AtcRational(Uint256([31334772429260116, 0, 0, 0])), + AtcRational(Uint256([30976640096490016, 0, 0, 0])), + AtcRational(Uint256([30622592428963244, 0, 0, 0])), + AtcRational(Uint256([30272583032643336, 0, 0, 0])), + AtcRational(Uint256([29926566036044560, 0, 0, 0])), + AtcRational(Uint256([29584496084446084, 0, 0, 0])), + AtcRational(Uint256([29246328334168376, 0, 0, 0])), + AtcRational(Uint256([28912018446910460, 0, 0, 0])), + AtcRational(Uint256([28581522584147772, 0, 0, 0])), + AtcRational(Uint256([28254797401590164, 0, 0, 0])), + AtcRational(Uint256([27931800043699132, 0, 0, 0])), + AtcRational(Uint256([27612488138263732, 0, 0, 0])), + AtcRational(Uint256([27296819791035000, 0, 0, 0])), + AtcRational(Uint256([26984753580417632, 0, 0, 0])), + AtcRational(Uint256([26676248552219052, 0, 0, 0])), + AtcRational(Uint256([26371264214454720, 0, 0, 0])), + AtcRational(Uint256([26069760532209384, 0, 0, 0])), + AtcRational(Uint256([25771697922553848, 0, 0, 0])), + AtcRational(Uint256([25477037249516400, 0, 0, 0])), + AtcRational(Uint256([25185739819108396, 0, 0, 0])), + AtcRational(Uint256([24897767374403864, 0, 0, 0])), + AtcRational(Uint256([24613082090671888, 0, 0, 0])), + AtcRational(Uint256([24331646570561924, 0, 0, 0])), + AtcRational(Uint256([24053423839341064, 0, 0, 0])), + AtcRational(Uint256([23778377340182780, 0, 0, 0])), + AtcRational(Uint256([23506470929506944, 0, 0, 0])), + AtcRational(Uint256([23237668872370196, 0, 0, 0])), + AtcRational(Uint256([22971935837906256, 0, 0, 0])), + AtcRational(Uint256([22709236894815996, 0, 0, 0])), + AtcRational(Uint256([22449537506906248, 0, 0, 0])), + AtcRational(Uint256([22192803528677148, 0, 0, 0])), + AtcRational(Uint256([21939001200957664, 0, 0, 0])), + AtcRational(Uint256([21688097146588316, 0, 0, 0])), + AtcRational(Uint256([21440058366151208, 0, 0, 0])), + AtcRational(Uint256([21194852233746400, 0, 0, 0])), + AtcRational(Uint256([20952446492814320, 0, 0, 0])), + AtcRational(Uint256([20712809252003940, 0, 0, 0])), + AtcRational(Uint256([20475908981085852, 0, 0, 0])), + AtcRational(Uint256([20241714506910040, 0, 0, 0])), + AtcRational(Uint256([20010195009407928, 0, 0, 0])), + AtcRational(Uint256([19781320017637956, 0, 0, 0])), + AtcRational(Uint256([19555059405874636, 0, 0, 0])), + AtcRational(Uint256([19331383389740252, 0, 0, 0])), + AtcRational(Uint256([19110262522378940, 0, 0, 0])), + AtcRational(Uint256([18891667690672852, 0, 0, 0])), + AtcRational(Uint256([18675570111499620, 0, 0, 0])), + AtcRational(Uint256([18461941328030932, 0, 0, 0])), + AtcRational(Uint256([18250753206071836, 0, 0, 0])), + AtcRational(Uint256([18041977930440052, 0, 0, 0])), + AtcRational(Uint256([17835588001385282, 0, 0, 0])), +]; + +#[cfg(test)] +mod test { + use stacks_common::util::hash::to_hex; + use stacks_common::util::uint::Uint256; + + use crate::chainstate::burn::atc::AtcRational; + use crate::chainstate::burn::BlockSnapshot; + use crate::stacks_common::util::uint::BitArray; + + impl AtcRational { + /// Convert to f64, and panic on conversion failure + pub fn to_f64(&self) -> f64 { + let ipart = self.ipart() as f64; + let fpart = self.0.low_u64() as f64; + ipart + (fpart / (u64::MAX as f64)) + } + + /// Convert from f64 between 0 and 1, panicking on conversion failure. Scales up the f64 so that its + /// fractional parts reside in the lower 64 bits of the AtcRational. + pub fn from_f64_unit(value: f64) -> Self { + if value < 0.0 || value >= 1.0 { + panic!("only usable for values in [0.0, 1.0) range"); + } + + // NOTE: this only changes the exponent, not the mantissa. + // Moreover, u128::from(u64::MAX) + 1 has f64 representation 0x43f0000000000000, so these conversions are safe. + let scaled_value = value * ((u128::from(u64::MAX) + 1) as f64); + + // this is safe, because 0.0 <= value < 1.0, so scaled_value <= u64::MAX + let value_u64 = scaled_value as u64; + Self(Uint256::from_u64(value_u64)) + } + } + + fn check_add(num_1: u64, den_1: u64, num_2: u64, den_2: u64) { + assert!( + (AtcRational::frac(num_1, den_1) + .add(&AtcRational::frac(num_2, den_2)) + .unwrap()) + .to_f64() + .abs() + - (num_1 as f64 / den_1 as f64 + num_2 as f64 / den_2 as f64).abs() + < (1.0 / (1024.0 * 1024.0)) + ); + } + + fn check_mul(num_1: u64, den_1: u64, num_2: u64, den_2: u64) { + assert!( + (AtcRational::frac(num_1, den_1) + .mul(&AtcRational::frac(num_2, den_2)) + .unwrap()) + .to_f64() + .abs() + - ((num_1 as f64 / den_1 as f64) * (num_2 as f64 / den_2 as f64)).abs() + < (1.0 / (1024.0 * 1024.0)) + ); + } + + #[test] + fn test_atc_rational() { + // zero + assert_eq!(AtcRational::zero().into_inner(), Uint256::from_u64(0)); + + // one + assert_eq!(AtcRational::one().into_inner(), Uint256::one() << 64); + + // one_sup + assert_eq!( + AtcRational::one_sup().into_inner(), + (Uint256::one() << 64) - Uint256::from_u64(1) + ); + + // max + assert_eq!( + AtcRational::max().into_inner(), + (Uint256::from_u64(u64::MAX) << 64) | Uint256::from_u64(u64::MAX) + ); + + // ipart + assert_eq!(AtcRational::one().ipart(), 1); + assert_eq!(AtcRational::frac(1, 2).ipart(), 0); + assert_eq!(AtcRational::frac(3, 2).ipart(), 1); + assert_eq!(AtcRational::frac(4, 2).ipart(), 2); + assert_eq!(AtcRational::frac(9999, 10000).ipart(), 0); + + // to_f64 + assert_eq!(AtcRational::one().to_f64(), 1.0); + assert_eq!(AtcRational::zero().to_f64(), 0.0); + assert_eq!(AtcRational::frac(1, 2).to_f64(), 0.5); + assert_eq!(AtcRational::frac(1, 32).to_f64(), 0.03125); + + // from_f64_unit + assert_eq!(AtcRational::from_f64_unit(0.0), AtcRational::zero()); + assert_eq!(AtcRational::from_f64_unit(0.5), AtcRational::frac(1, 2)); + assert_eq!( + AtcRational::from_f64_unit(0.03125), + AtcRational::frac(1, 32) + ); + + // is_overflowed + assert!(!AtcRational::max().is_overflowed()); + assert!( + AtcRational(AtcRational::max().into_inner() + Uint256::from_u64(1)).is_overflowed() + ); + assert!(AtcRational::max() + .add(&AtcRational(Uint256::from_u64(1))) + .is_none()); + + // frac constructor produces values between 0 and u64::MAX + assert_eq!(AtcRational::frac(1, 1), AtcRational::one()); + assert_eq!( + AtcRational::frac(1, 2).0, + Uint256::from_u64(u64::MAX / 2) + Uint256::from_u64(1) + ); + assert_eq!( + AtcRational::frac(1, 4).0, + Uint256::from_u64(u64::MAX / 4) + Uint256::from_u64(1) + ); + assert_eq!( + AtcRational::frac(1, 8).0, + Uint256::from_u64(u64::MAX / 8) + Uint256::from_u64(1) + ); + assert_eq!( + AtcRational::frac(1, 16).0, + Uint256::from_u64(u64::MAX / 16) + Uint256::from_u64(1) + ); + assert_eq!( + AtcRational::frac(1, 32).0, + Uint256::from_u64(u64::MAX / 32) + Uint256::from_u64(1) + ); + + // fractions auto-normalize + assert_eq!(AtcRational::frac(2, 4), AtcRational::frac(1, 2)); + assert_eq!(AtcRational::frac(100, 400), AtcRational::frac(1, 4)); + assert_eq!(AtcRational::frac(5, 25), AtcRational::frac(1, 5)); + + // fractions can be added + assert_eq!( + AtcRational::frac(1, 2) + .add(&AtcRational::frac(1, 2)) + .unwrap(), + AtcRational::one() + ); + assert_eq!( + AtcRational::frac(1, 4) + .add(&AtcRational::frac(1, 4)) + .unwrap(), + AtcRational::frac(1, 2) + ); + assert_eq!( + AtcRational::frac(1, 8) + .add(&AtcRational::frac(1, 8)) + .unwrap(), + AtcRational::frac(1, 4) + ); + assert_eq!( + AtcRational::frac(3, 8) + .add(&AtcRational::frac(3, 8)) + .unwrap(), + AtcRational::frac(3, 4) + ); + assert_eq!( + AtcRational::max().add(&AtcRational(Uint256::from_u64(1))), + None + ); + + // fractions can be subtracted + assert_eq!( + AtcRational::frac(1, 2) + .sub(&AtcRational::frac(1, 2)) + .unwrap(), + AtcRational::zero() + ); + assert_eq!( + AtcRational::one().sub(&AtcRational::frac(1, 2)).unwrap(), + AtcRational::frac(1, 2) + ); + assert_eq!( + AtcRational::one().sub(&AtcRational::frac(1, 32)).unwrap(), + AtcRational::frac(31, 32) + ); + + // fractions can be multiplied + assert_eq!( + AtcRational::frac(1, 2) + .mul(&AtcRational::frac(1, 2)) + .unwrap(), + AtcRational::frac(1, 4) + ); + assert_eq!( + AtcRational::frac(5, 6) + .mul(&AtcRational::frac(7, 8)) + .unwrap(), + AtcRational::frac(35, 48) + ); + assert_eq!( + AtcRational::frac(100, 2) + .mul(&AtcRational::frac(200, 4)) + .unwrap(), + AtcRational::frac(20000, 8) + ); + assert_eq!( + AtcRational::frac(1, 2) + .mul(&AtcRational::frac(1024, 1)) + .unwrap(), + AtcRational::frac(512, 1) + ); + + assert_eq!( + AtcRational::frac(1, 2).min(&AtcRational::frac(15, 32)), + AtcRational::frac(15, 32) + ); + + // we only do stuff with an AtcRational in the range [0..1), since if the ATC-C is greater + // than 1.0, then the null miner never wins (and thus there's no need to compute the null + // miner probability). + // + // The only time an AtcRational is greater than 1.0 is when we scale it up to the lookup + // table index, which has 1024 items. We check that here as well. + for num_1 in 0..=1 { + for den_1 in 1..=1024 { + test_debug!("{}/{}", num_1, den_1); + for num_2 in 0..=1 { + for den_2 in 1..=1024 { + check_add(num_1, den_1, num_2, den_2); + check_mul(num_1, den_1, num_2, den_2); + check_mul(num_1, den_1, 1024, 1); + check_mul(num_2, den_2, 1024, 1); + } + } + } + } + } + + #[test] + #[ignore] + fn print_functions() { + let mut grid: Vec> = vec![vec![' '; 100]; 102]; + for i in 0..100 { + let f_atc = (i as f64) / 100.0; + let atc = AtcRational::frac(i as u64, 100); + let l_atc = BlockSnapshot::null_miner_logistic(atc).to_f64(); + let p_atc = BlockSnapshot::null_miner_probability(atc).to_f64(); + + // NOTE: columns increase downwards, so flip this + let l_atc_100 = 100 - ((l_atc * 100.0) as usize); + let p_atc_100 = 100 - ((p_atc * 100.0) as usize); + let a_atc_100 = 100 - (((1.0 - f_atc) * 100.0) as usize); + grid[a_atc_100][i] = '$'; + grid[l_atc_100][i] = '#'; + grid[p_atc_100][i] = '^'; + } + for j in 0..100 { + grid[101][j] = '_'; + } + + println!(""); + for row in grid.iter() { + let grid_str: String = row.clone().into_iter().collect(); + println!("|{}", &grid_str); + } + } + + /// Calculate the logic advantage curve for the null miner. + /// This function's parameters are chosen such that: + /// * if the ATC carryover has diminished by less than 20%, the null miner has negligible + /// chances of winning. This is to avoid punishing honest miners when there are flash blocks. + /// * If the ATC carryover has diminished by between 20% and 80%, the null miner has a + /// better-than-linear probability of winning. That is, if the burnchain MEV miner pays less + /// than X% of the expected carryover (20% <= X < 80%), then their probability of winning is + /// (1) strictly less than X%, and (2) strictly less than any Pr[X% - c] for 0 < c < X. + /// * If the ATC carryover is less than 20%, the null miner has an overwhelmingly likely chance + /// of winning (>95%). + /// + /// The logistic curve fits the points (atc=0.2, null_prob=0.75) and (atc=0.8, null_prob=0.01). + fn null_miner_logistic(atc: f64) -> f64 { + // recall the inverted logistic function: + // + // L + // f(x) = --------------------- + // -k * (x0 - x) + // 1 + e + // + // It is shaped like a *backwards* "S" -- it approaches L as `x` tends towards negative + // infinity, and it approaches 0 as `x` tends towards positive infinity. This function is + // the null miner advantage function, where `x` is the ATC carryover value. + // + // We need to drive x0 and k from our two points: + // + // (x1, y1) = (0.2, 0.75) + // (x2, y2) = (0.8, 0.01) + // + // to derive L, x0, and k: + // L = 0.8 + // z = ln(L/y1 - 1) / ln(L/y2 - 1) + // x0 = (x1 - z * x2) / (1 - z) + // k = ln(L/y1 - 1) / (x1 - x0) + // + // The values for x0 and k were generated with the following GNU bc script: + // ``` + // $ cat /tmp/variables.bc + // scale=32 + // supremum=0.8 /* this is L */ + // x1=0.2 + // y1=0.75 + // x2=0.8 + // y2=0.01 + // z=l(supremum/y1 - 1)/l(supremum/y2 -1) + // x0=(x1 - z * x2)/(1 - z) + // k=l(supremum/y1 - 1)/(x1 - x0) + // print "x0 = "; x0 + // print "k = "; k + // ``` + // + // This script evaluates to: + // ``` + // $ bc -l < /tmp/variables.bc + // x0 = .42957690816204645842320195118064 + // k = 11.79583008928205260028158351938437 + // ``` + + let L: f64 = 0.8; + + // truncated f64 + let x0: f64 = 0.42957690816204647; + let k: f64 = 11.795830089282052; + + // natural logarithm constant + let e: f64 = 2.718281828459045; + + let adv = L / (1.0 + e.powf(-k * (x0 - atc))); + adv + } + + #[test] + fn make_null_miner_lookup_table() { + use crate::chainstate::burn::atc::ATC_LOOKUP; + let mut lookup_table = Vec::with_capacity(1024); + for atc in 0..1024 { + let fatc = (atc as f64) / 1024.0; + let lgst_fatc = null_miner_logistic(fatc); + let lgst_rational = AtcRational::from_f64_unit(lgst_fatc); + assert_eq!(ATC_LOOKUP[atc], lgst_rational); + assert_eq!(ATC_LOOKUP[atc].to_f64(), lgst_fatc); + lookup_table.push(lgst_rational); + } + println!("["); + for lt in lookup_table.into_iter() { + let inner = lt.into_inner(); + println!(" AtcRational(Uint256({:?})),", &inner.0); + } + println!("]"); + } +} diff --git a/stackslib/src/chainstate/burn/db/processing.rs b/stackslib/src/chainstate/burn/db/processing.rs index 760188829c2..70f170a60c9 100644 --- a/stackslib/src/chainstate/burn/db/processing.rs +++ b/stackslib/src/chainstate/burn/db/processing.rs @@ -40,7 +40,7 @@ impl<'a> SortitionHandleTx<'a> { fn check_transaction( &mut self, burnchain: &Burnchain, - blockstack_op: &BlockstackOperationType, + blockstack_op: &mut BlockstackOperationType, reward_info: Option<&RewardSetInfo>, ) -> Result<(), BurnchainError> { match blockstack_op { @@ -48,12 +48,13 @@ impl<'a> SortitionHandleTx<'a> { op.check(burnchain, self).map_err(|e| { warn!( "REJECTED({}) leader key register {} at {},{}: {:?}", - op.block_height, &op.txid, op.block_height, op.vtxindex, &e + op.block_height, &op.txid, op.block_height, op.vtxindex, &e; + "consensus_hash" => %op.consensus_hash ); BurnchainError::OpError(e) }) } - BlockstackOperationType::LeaderBlockCommit(ref op) => { + BlockstackOperationType::LeaderBlockCommit(ref mut op) => { op.check(burnchain, self, reward_info).map_err(|e| { warn!( "REJECTED({}) leader block commit {} at {},{} (parent {},{}): {:?}", @@ -63,7 +64,8 @@ impl<'a> SortitionHandleTx<'a> { op.vtxindex, op.parent_block_ptr, op.parent_vtxindex, - &e + &e; + "stacks_block_hash" => %op.block_header_hash ); BurnchainError::OpError(e) }) @@ -130,23 +132,6 @@ impl<'a> SortitionHandleTx<'a> { e })?; - let total_burn = state_transition - .accepted_ops - .iter() - .try_fold(0u64, |acc, op| { - let bf = match op { - BlockstackOperationType::LeaderBlockCommit(ref op) => op.burn_fee, - _ => 0, - }; - acc.checked_add(bf) - }); - - let txids = state_transition - .accepted_ops - .iter() - .map(|ref op| op.txid()) - .collect(); - let next_pox = SortitionDB::make_next_pox_id(parent_pox.clone(), next_pox_info.as_ref()); let next_sortition_id = SortitionDB::make_next_sortition_id( parent_pox.clone(), @@ -162,9 +147,7 @@ impl<'a> SortitionHandleTx<'a> { &next_pox, parent_snapshot, block_header, - &state_transition.burn_dist, - &txids, - total_burn, + &state_transition, initial_mining_bonus_ustx, ) .map_err(|e| { @@ -278,7 +261,7 @@ impl<'a> SortitionHandleTx<'a> { let mut missed_block_commits = vec![]; // classify and check each transaction - blockstack_txs.retain(|blockstack_op| { + blockstack_txs.retain_mut(|blockstack_op| { match self.check_transaction(burnchain, blockstack_op, reward_set_info) { Ok(_) => true, Err(BurnchainError::OpError(OpError::MissedBlockCommit(missed_op))) => { @@ -423,6 +406,7 @@ mod tests { block_height: 102, burn_parent_modulus: (101 % BURN_BLOCK_MINED_AT_MODULUS) as u8, burn_header_hash: BurnchainHeaderHash([0x03; 32]), + treatment: vec![], }; let mut burnchain = Burnchain::default_unittest(100, &first_burn_hash); diff --git a/stackslib/src/chainstate/burn/db/sortdb.rs b/stackslib/src/chainstate/burn/db/sortdb.rs index 8ffda837193..15a3bf56416 100644 --- a/stackslib/src/chainstate/burn/db/sortdb.rs +++ b/stackslib/src/chainstate/burn/db/sortdb.rs @@ -29,8 +29,8 @@ use rand; use rand::RngCore; use rusqlite::types::ToSql; use rusqlite::{ - Connection, Error as sqlite_error, OpenFlags, OptionalExtension, Row, Transaction, - TransactionBehavior, NO_PARAMS, + params, Connection, Error as sqlite_error, OpenFlags, OptionalExtension, Row, Transaction, + TransactionBehavior, }; use sha2::{Digest, Sha512_256}; use stacks_common::address::AddressHashMode; @@ -38,21 +38,20 @@ use stacks_common::types::chainstate::{ BlockHeaderHash, BurnchainHeaderHash, PoxId, SortitionId, StacksAddress, StacksBlockId, TrieHash, VRFSeed, }; +use stacks_common::types::sqlite::NO_PARAMS; use stacks_common::types::StacksPublicKeyBuffer; use stacks_common::util::hash::{hex_bytes, to_hex, Hash160, Sha512Trunc256Sum}; use stacks_common::util::secp256k1::{MessageSignature, Secp256k1PublicKey}; use stacks_common::util::vrf::*; use stacks_common::util::{get_epoch_time_secs, log}; -use wsts::common::Signature as WSTSSignature; -use wsts::curve::point::{Compressed, Point}; use crate::burnchains::affirmation::{AffirmationMap, AffirmationMapEntry}; use crate::burnchains::bitcoin::BitcoinNetworkType; use crate::burnchains::db::{BurnchainDB, BurnchainHeaderReader}; use crate::burnchains::{ - Address, Burnchain, BurnchainBlockHeader, BurnchainRecipient, BurnchainStateTransition, - BurnchainStateTransitionOps, BurnchainTransaction, BurnchainView, Error as BurnchainError, - PoxConstants, PublicKey, Txid, + Address, Burnchain, BurnchainBlockHeader, BurnchainRecipient, BurnchainSigner, + BurnchainStateTransition, BurnchainStateTransitionOps, BurnchainTransaction, BurnchainView, + Error as BurnchainError, PoxConstants, PublicKey, Txid, }; use crate::chainstate::burn::operations::leader_block_commit::{ MissedBlockCommit, RewardSetInfo, OUTPUTS_PER_COMMIT, @@ -67,10 +66,10 @@ use crate::chainstate::burn::{ use crate::chainstate::coordinator::{ Error as CoordinatorError, PoxAnchorBlockStatus, RewardCycleInfo, SortitionDBMigrator, }; -use crate::chainstate::nakamoto::NakamotoBlockHeader; +use crate::chainstate::nakamoto::{NakamotoBlockHeader, NakamotoChainState}; use crate::chainstate::stacks::address::{PoxAddress, StacksAddressExtensions}; use crate::chainstate::stacks::boot::PoxStartCycleInfo; -use crate::chainstate::stacks::db::{StacksChainState, StacksHeaderInfo}; +use crate::chainstate::stacks::db::{StacksBlockHeaderTypes, StacksChainState, StacksHeaderInfo}; use crate::chainstate::stacks::index::marf::{MARFOpenOpts, MarfConnection, MARF}; use crate::chainstate::stacks::index::storage::TrieFileStorage; use crate::chainstate::stacks::index::{ @@ -83,7 +82,6 @@ use crate::core::{ FIRST_BURNCHAIN_CONSENSUS_HASH, FIRST_STACKS_BLOCK_HASH, STACKS_EPOCH_MAX, }; use crate::net::neighbors::MAX_NEIGHBOR_BLOCK_DELAY; -use crate::net::Error as NetError; use crate::util_lib::db::{ db_mkdirs, get_ancestor_block_hash, opt_u64_to_sql, query_count, query_row, query_row_columns, query_row_panic, query_rows, sql_pragma, table_exists, tx_begin_immediate, tx_busy_handler, @@ -282,6 +280,14 @@ impl FromRow for LeaderBlockCommitOp { let burn_parent_modulus: u8 = row.get_unwrap("burn_parent_modulus"); + let punished_str: Option = row.get_unwrap("punished"); + let punished = punished_str + .as_deref() + .map(serde_json::from_str) + .transpose() + .map_err(|e| db_error::SerializationError(e))? + .unwrap_or_else(|| vec![]); + let block_commit = LeaderBlockCommitOp { block_header_hash, new_seed, @@ -301,6 +307,7 @@ impl FromRow for LeaderBlockCommitOp { vtxindex, block_height, burn_header_hash, + treatment: punished, }; Ok(block_commit) } @@ -508,7 +515,7 @@ impl FromRow for StacksEpoch { } } -pub const SORTITION_DB_VERSION: &'static str = "8"; +pub const SORTITION_DB_VERSION: &'static str = "9"; const SORTITION_DB_INITIAL_SCHEMA: &'static [&'static str] = &[ r#" @@ -744,7 +751,10 @@ const SORTITION_DB_SCHEMA_8: &'static [&'static str] = &[ );"#, ]; -const LAST_SORTITION_DB_INDEX: &'static str = "index_vote_for_aggregate_key_burn_header_hash"; +static SORTITION_DB_SCHEMA_9: &[&'static str] = + &[r#"ALTER TABLE block_commits ADD punished TEXT DEFAULT NULL;"#]; + +const LAST_SORTITION_DB_INDEX: &'static str = "index_block_commits_by_sender"; const SORTITION_DB_INDEXES: &'static [&'static str] = &[ "CREATE INDEX IF NOT EXISTS snapshots_block_hashes ON snapshots(block_height,index_root,winning_stacks_block_hash);", "CREATE INDEX IF NOT EXISTS snapshots_block_stacks_hashes ON snapshots(num_sortitions,index_root,winning_stacks_block_hash);", @@ -766,14 +776,34 @@ const SORTITION_DB_INDEXES: &'static [&'static str] = &[ "CREATE INDEX IF NOT EXISTS index_burn_header_hash_pox_valid ON snapshots(burn_header_hash,pox_valid);", "CREATE INDEX IF NOT EXISTS index_delegate_stx_burn_header_hash ON delegate_stx(burn_header_hash);", "CREATE INDEX IF NOT EXISTS index_vote_for_aggregate_key_burn_header_hash ON vote_for_aggregate_key(burn_header_hash);", + "CREATE INDEX IF NOT EXISTS index_block_commits_by_burn_height ON block_commits(block_height);", + "CREATE INDEX IF NOT EXISTS index_block_commits_by_sender ON block_commits(apparent_sender);" ]; +/// Handle to the sortition database, a MARF'ed sqlite DB on disk. +/// It stores information pertaining to cryptographic sortitions performed in each Bitcoin block -- +/// either to select the next Stacks block (in epoch 2.5 and earlier), or to choose the next Stacks +/// miner (epoch 3.0 and later). pub struct SortitionDB { + /// Whether or not write operations are permitted. Pertains to whether or not transaction + /// objects can be created or schema migrations can happen on this SortitionDB instance. pub readwrite: bool, + /// If true, then while write operations will be permitted, they will not be committed (and may + /// even be skipped). This is not used in production; it's used in the `stacks-inspect` tool + /// to simulate what could happen (e.g. to replay sortitions with different anti-MEV strategies + /// without corrupting the underlying DB). + pub dryrun: bool, + /// Handle to the MARF which stores an index over each burnchain and PoX fork. pub marf: MARF, + /// First burnchain block height at which sortitions will be considered. All Stacks epochs + /// besides epoch 1.0 must start at or after this height. pub first_block_height: u64, + /// Hash of the first burnchain block at which sortitions will be considered. pub first_burn_header_hash: BurnchainHeaderHash, + /// PoX constants that pertain to this DB, for purposes of (but not limited to) evaluating PoX + /// reward cycles and evaluating block-commit validity within a PoX reward cycle pub pox_constants: PoxConstants, + /// Path on disk from which this DB was opened (caller-given; not resolved). pub path: String, } @@ -781,6 +811,7 @@ pub struct SortitionDB { pub struct SortitionDBTxContext { pub first_block_height: u64, pub pox_constants: PoxConstants, + pub dryrun: bool, } #[derive(Clone)] @@ -788,6 +819,7 @@ pub struct SortitionHandleContext { pub first_block_height: u64, pub pox_constants: PoxConstants, pub chain_tip: SortitionId, + pub dryrun: bool, } pub type SortitionDBConn<'a> = IndexDBConn<'a, SortitionDBTxContext, SortitionId>; @@ -829,7 +861,7 @@ pub fn get_block_commit_by_txid( txid: &Txid, ) -> Result, db_error> { let qry = "SELECT * FROM block_commits WHERE sortition_id = ?1 AND txid = ?2 LIMIT 1"; - let args: &[&dyn ToSql] = &[sort_id, txid]; + let args = params![sort_id, txid]; query_row(conn, qry, args) } @@ -1041,6 +1073,14 @@ pub trait SortitionHandle { /// Returns Err(..) on DB errors fn get_nakamoto_tip(&self) -> Result, db_error>; + /// Get the block ID of the highest-processed Nakamoto block on this history. + fn get_nakamoto_tip_block_id(&self) -> Result, db_error> { + let Some((ch, bhh, _)) = self.get_nakamoto_tip()? else { + return Ok(None); + }; + Ok(Some(StacksBlockId::new(&ch, &bhh))) + } + /// is the given block a descendant of `potential_ancestor`? /// * block_at_burn_height: the burn height of the sortition that chose the stacks block to check /// * potential_ancestor: the stacks block hash of the potential ancestor @@ -1130,6 +1170,7 @@ impl<'a> SortitionHandleTx<'a> { chain_tip: parent_chain_tip.clone(), first_block_height: conn.first_block_height, pox_constants: conn.pox_constants.clone(), + dryrun: conn.dryrun, }, ); @@ -1173,10 +1214,10 @@ impl<'a> SortitionHandleTx<'a> { }; let qry = "SELECT * FROM leader_keys WHERE sortition_id = ?1 AND block_height = ?2 AND vtxindex = ?3 LIMIT 2"; - let args: &[&dyn ToSql] = &[ - &ancestor_snapshot.sortition_id, - &u64_to_sql(key_block_height)?, - &key_vtxindex, + let args = params![ + ancestor_snapshot.sortition_id, + u64_to_sql(key_block_height)?, + key_vtxindex, ]; query_row_panic(self.tx(), qry, args, || { format!( @@ -1538,6 +1579,11 @@ impl<'a> SortitionHandleTx<'a> { reward_set_vrf_seed: &SortitionHash, next_pox_info: Option<&RewardCycleInfo>, ) -> Result, BurnchainError> { + let allow_nakamoto_punishment = SortitionDB::get_stacks_epoch(self.sqlite(), block_height)? + .ok_or_else(|| BurnchainError::NoStacksEpoch)? + .epoch_id + .allows_pox_punishment(); + if let Some(next_pox_info) = next_pox_info { if let PoxAnchorBlockStatus::SelectedAndKnown( ref anchor_block, @@ -1581,11 +1627,14 @@ impl<'a> SortitionHandleTx<'a> { .map(|ix| { let recipient = reward_set.rewarded_addresses[ix as usize].clone(); info!("PoX recipient chosen"; - "recipient" => recipient.to_burnchain_repr(), - "block_height" => block_height); + "recipient" => recipient.to_burnchain_repr(), + "block_height" => block_height, + "anchor_stacks_block_hash" => &anchor_block, + ); (recipient, u16::try_from(ix).unwrap()) }) .collect(), + allow_nakamoto_punishment, })) } else { test_debug!( @@ -1613,13 +1662,16 @@ impl<'a> SortitionHandleTx<'a> { let ix = u16::try_from(ix).unwrap(); let recipient = self.get_reward_set_entry(ix)?; info!("PoX recipient chosen"; - "recipient" => recipient.to_burnchain_repr(), - "block_height" => block_height); + "recipient" => recipient.to_burnchain_repr(), + "block_height" => block_height, + "stacks_block_hash" => %anchor_block + ); recipients.push((recipient, ix)); } Ok(Some(RewardSetInfo { anchor_block, recipients, + allow_nakamoto_punishment, })) } } else { @@ -1635,7 +1687,7 @@ impl<'a> SortitionHandleTx<'a> { sortition_id: &SortitionId, ) -> Result<(Vec, u128), db_error> { let sql = "SELECT pox_payouts FROM snapshots WHERE sortition_id = ?1"; - let args: &[&dyn ToSql] = &[sortition_id]; + let args = params![sortition_id]; let pox_addrs_json: String = query_row(self, sql, args)?.ok_or(db_error::NotFoundError)?; let pox_addrs: (Vec, u128) = @@ -1730,11 +1782,11 @@ impl<'a> SortitionHandleTx<'a> { stacks_block_height: u64, ) -> Result<(), db_error> { let sql = "INSERT OR REPLACE INTO stacks_chain_tips (sortition_id,consensus_hash,block_hash,block_height) VALUES (?1,?2,?3,?4)"; - let args: &[&dyn ToSql] = &[ + let args = params![ sort_id, consensus_hash, stacks_block_hash, - &u64_to_sql(stacks_block_height)?, + u64_to_sql(stacks_block_height)?, ]; self.execute(sql, args)?; Ok(()) @@ -1769,6 +1821,67 @@ impl<'a> SortitionHandleTx<'a> { if cur_epoch.epoch_id >= StacksEpochId::Epoch30 { // Nakamoto blocks are always processed in order since the chain can't fork + // arbitrarily. + // + // However, a "benign" fork can arise when a late tenure-change is processed. This + // would happen if + // + // 1. miner A wins sortition and produces a tenure-change; + // 2. miner B wins sortition, and signers sign its tenure-change; + // 3. miner C wins sortition by confirming miner A's last-block + // + // Depending on the timing of things, signers could end up signing both miner B and + // miner C's tenure-change blocks, which are in conflict. The Stacks node must be able + // to handle this case; it does so simply by processing both blocks (as Stacks forks), + // and letting signers figure out which one is canonical. + // + // As a result, only update the canonical Nakamoto tip if the given block is higher + // than the existing tip for this sortiton (because it represents more overall signer + // votes). + let current_sortition_tip : Option<(ConsensusHash, BlockHeaderHash, u64)> = self.query_row_and_then( + "SELECT consensus_hash,block_hash,block_height FROM stacks_chain_tips WHERE sortition_id = ?1 ORDER BY block_height DESC LIMIT 1", + rusqlite::params![&burn_tip.sortition_id], + |row| Ok((row.get_unwrap(0), row.get_unwrap(1), (u64::try_from(row.get_unwrap::<_, i64>(2)).expect("FATAL: block height too high")))) + ).optional()?; + + if let Some((cur_ch, cur_bhh, cur_height)) = current_sortition_tip { + let will_replace = if cur_height < stacks_block_height { + true + } else if cur_height > stacks_block_height { + false + } else { + if &cur_ch == consensus_hash { + // same sortition (i.e. nakamoto block) + // no replacement + false + } else { + // tips come from different sortitions + // break ties by going with the latter-signed block + let sn_current = SortitionDB::get_block_snapshot_consensus(self, &cur_ch)? + .ok_or(db_error::NotFoundError)?; + let sn_accepted = + SortitionDB::get_block_snapshot_consensus(self, &consensus_hash)? + .ok_or(db_error::NotFoundError)?; + sn_current.block_height < sn_accepted.block_height + } + }; + + debug!("Setting Stacks tip as accepted"; + "replace?" => will_replace, + "current_tip_consensus_hash" => %cur_ch, + "current_tip_block_header_hash" => %cur_bhh, + "current_tip_block_id" => %StacksBlockId::new(&cur_ch, &cur_bhh), + "current_tip_height" => cur_height, + "accepted_tip_consensus_hash" => %consensus_hash, + "accepted_tip_block_header_hash" => %stacks_block_hash, + "accepted_tip_block_id" => %StacksBlockId::new(consensus_hash, stacks_block_hash), + "accepted_tip_height" => stacks_block_height); + + if !will_replace { + return Ok(()); + } + } + self.update_canonical_stacks_tip( &burn_tip.sortition_id, consensus_hash, @@ -1780,9 +1893,9 @@ impl<'a> SortitionHandleTx<'a> { // in epoch 2.x, where we track canonical stacks tip via the sortition DB let arrival_index = SortitionDB::get_max_arrival_index(self)?; - let args: &[&dyn ToSql] = &[ - &u64_to_sql(stacks_block_height)?, - &u64_to_sql(arrival_index + 1)?, + let args = params![ + u64_to_sql(stacks_block_height)?, + u64_to_sql(arrival_index + 1)?, consensus_hash, stacks_block_hash, ]; @@ -1838,80 +1951,6 @@ impl<'a> SortitionHandleConn<'a> { SortitionHandleConn::open_reader(connection, &sn.sortition_id) } - /// Does the sortition db expect to receive blocks - /// signed by this signer set? - /// - /// This only works if `consensus_hash` is within two reward cycles (4200 blocks) of the - /// sortition pointed to by this handle's sortiton tip. If it isn't, then this - /// method returns Ok(false). This is to prevent a DDoS vector whereby compromised stale - /// Signer keys can be used to blast out lots of Nakamoto blocks that will be accepted - /// but never processed. So, `consensus_hash` can be in the same reward cycle as - /// `self.context.chain_tip`, or the previous, but no earlier. - pub fn expects_signer_signature( - &self, - consensus_hash: &ConsensusHash, - signer_signature: &WSTSSignature, - message: &[u8], - aggregate_public_key: &Point, - ) -> Result { - let sn = SortitionDB::get_block_snapshot(self, &self.context.chain_tip)? - .ok_or(db_error::NotFoundError) - .map_err(|e| { - warn!("No sortition for tip: {:?}", &self.context.chain_tip); - e - })?; - - let ch_sn = SortitionDB::get_block_snapshot_consensus(self, consensus_hash)? - .ok_or(db_error::NotFoundError) - .map_err(|e| { - warn!("No sortition for consensus hash: {:?}", consensus_hash); - e - })?; - - if ch_sn.block_height - + u64::from(self.context.pox_constants.reward_cycle_length) - + u64::from(self.context.pox_constants.prepare_length) - < sn.block_height - { - // too far in the past - debug!("Block with consensus hash {} is too far in the past", consensus_hash; - "consensus_hash" => %consensus_hash, - "block_height" => ch_sn.block_height, - "tip_block_height" => sn.block_height - ); - return Ok(false); - } - - // this given consensus hash must be an ancestor of our chain tip - let ch_at = self - .get_consensus_at(ch_sn.block_height)? - .ok_or(db_error::NotFoundError) - .map_err(|e| { - warn!("No ancestor consensus hash"; - "tip" => %self.context.chain_tip, - "consensus_hash" => %consensus_hash, - "consensus_hash height" => %ch_sn.block_height - ); - e - })?; - - if ch_at != ch_sn.consensus_hash { - // not an ancestor - warn!("Consensus hash is not an ancestor of the sortition tip"; - "tip" => %self.context.chain_tip, - "consensus_hash" => %consensus_hash - ); - return Err(db_error::NotFoundError); - } - - // is this consensus hash in this fork? - if SortitionDB::get_burnchain_header_hash_by_consensus(self, consensus_hash)?.is_none() { - return Ok(false); - } - - Ok(signer_signature.verify(aggregate_public_key, message)) - } - pub fn get_reward_set_size_at(&self, sortition_id: &SortitionId) -> Result { self.get_indexed(sortition_id, &db_keys::pox_reward_set_size()) .map(|x| { @@ -1962,32 +2001,6 @@ impl<'a> SortitionHandleConn<'a> { Ok(anchor_block_txid) } - /// Get the last processed reward cycle. - /// Since we always process a RewardSetInfo at the start of a reward cycle (anchor block or - /// no), this is simply the same as asking which reward cycle this SortitionHandleConn's - /// sortition tip is in. - pub fn get_last_processed_reward_cycle(&self) -> Result { - let sn = SortitionDB::get_block_snapshot(self, &self.context.chain_tip)? - .ok_or(db_error::NotFoundError)?; - let rc = self - .context - .pox_constants - .block_height_to_reward_cycle(self.context.first_block_height, sn.block_height) - .expect("FATAL: sortition from before system start"); - let rc_start_block = self - .context - .pox_constants - .reward_cycle_to_block_height(self.context.first_block_height, rc); - let last_rc = if sn.block_height >= rc_start_block { - rc - } else { - // NOTE: the reward cycle is "processed" at reward cycle index 1, not index 0 - rc.saturating_sub(1) - }; - - Ok(last_rc) - } - pub fn get_reward_cycle_unlocks( &mut self, cycle: u64, @@ -2019,6 +2032,7 @@ impl<'a> SortitionHandleConn<'a> { chain_tip: chain_tip.clone(), first_block_height: connection.context.first_block_height, pox_constants: connection.context.pox_constants.clone(), + dryrun: connection.context.dryrun, }, index: &connection.index, }) @@ -2202,6 +2216,28 @@ impl<'a> SortitionHandleConn<'a> { }) } + /// Get the latest block snapshot on this fork where a sortition occured. + pub fn get_last_snapshot_with_sortition_from_tip(&self) -> Result { + let ancestor_hash = + match self.get_indexed(&self.context.chain_tip, &db_keys::last_sortition())? { + Some(hex_str) => BurnchainHeaderHash::from_hex(&hex_str).unwrap_or_else(|_| { + panic!( + "FATAL: corrupt database: failed to parse {} into a hex string", + &hex_str + ) + }), + None => { + // no prior sortitions, so get the first + return self.get_first_block_snapshot(); + } + }; + + self.get_block_snapshot(&ancestor_hash).map(|snapshot_opt| { + snapshot_opt + .unwrap_or_else(|| panic!("FATAL: corrupt index: no snapshot {}", ancestor_hash)) + }) + } + pub fn get_leader_key_at( &self, key_block_height: u64, @@ -2571,6 +2607,19 @@ impl<'a> SortitionHandleConn<'a> { } } } + + pub fn get_reward_set_payouts_at( + &self, + sortition_id: &SortitionId, + ) -> Result<(Vec, u128), db_error> { + let sql = "SELECT pox_payouts FROM snapshots WHERE sortition_id = ?1"; + let args = params![sortition_id]; + let pox_addrs_json: String = query_row(self, sql, args)?.ok_or(db_error::NotFoundError)?; + + let pox_addrs: (Vec, u128) = + serde_json::from_str(&pox_addrs_json).expect("FATAL: failed to decode pox payout JSON"); + Ok(pox_addrs) + } } // Connection methods @@ -2586,18 +2635,20 @@ impl SortitionDB { SortitionDBTxContext { first_block_height: self.first_block_height, pox_constants: self.pox_constants.clone(), + dryrun: self.dryrun, }, ); Ok(index_tx) } - /// Make an indexed connectino + /// Make an indexed connection pub fn index_conn<'a>(&'a self) -> SortitionDBConn<'a> { SortitionDBConn::new( &self.marf, SortitionDBTxContext { first_block_height: self.first_block_height, pox_constants: self.pox_constants.clone(), + dryrun: self.dryrun, }, ) } @@ -2609,10 +2660,46 @@ impl SortitionDB { first_block_height: self.first_block_height, chain_tip: chain_tip.clone(), pox_constants: self.pox_constants.clone(), + dryrun: self.dryrun, }, ) } + pub fn index_handle_at_block<'a>( + &'a self, + chainstate: &StacksChainState, + stacks_block_id: &StacksBlockId, + ) -> Result, db_error> { + let lookup_block_id = if let Some(ref unconfirmed_state) = chainstate.unconfirmed_state { + if &unconfirmed_state.unconfirmed_chain_tip == stacks_block_id { + &unconfirmed_state.confirmed_chain_tip + } else { + stacks_block_id + } + } else { + stacks_block_id + }; + let header = match NakamotoChainState::get_block_header(chainstate.db(), lookup_block_id) { + Ok(Some(x)) => x, + x => { + debug!("Failed to get block header: {:?}", x); + return Err(db_error::NotFoundError); + } + }; + // if its a nakamoto block, we want to use the burnchain view of the block + let burn_view = match &header.anchored_header { + StacksBlockHeaderTypes::Epoch2(_) => header.consensus_hash, + StacksBlockHeaderTypes::Nakamoto(_) => header.burn_view.ok_or_else(|| { + error!("Loaded nakamoto block header without a burn view"; "block_id" => %stacks_block_id); + db_error::Other("Nakamoto block header without burn view".into()) + })?, + }; + + let snapshot = SortitionDB::get_block_snapshot_consensus(&self.conn(), &burn_view)? + .ok_or(db_error::NotFoundError)?; + Ok(self.index_handle(&snapshot.sortition_id)) + } + pub fn tx_handle_begin<'a>( &'a mut self, chain_tip: &SortitionId, @@ -2620,13 +2707,13 @@ impl SortitionDB { if !self.readwrite { return Err(db_error::ReadOnly); } - Ok(SortitionHandleTx::new( &mut self.marf, SortitionHandleContext { first_block_height: self.first_block_height, chain_tip: chain_tip.clone(), pox_constants: self.pox_constants.clone(), + dryrun: self.dryrun, }, )) } @@ -2666,6 +2753,7 @@ impl SortitionDB { path: path.to_string(), marf, readwrite, + dryrun: false, pox_constants, first_block_height, first_burn_header_hash, @@ -2723,6 +2811,7 @@ impl SortitionDB { path: path.to_string(), marf, readwrite, + dryrun: false, first_block_height, pox_constants, first_burn_header_hash: first_burn_hash.clone(), @@ -2799,6 +2888,7 @@ impl SortitionDB { SortitionDB::apply_schema_6(&db_tx, epochs_ref)?; SortitionDB::apply_schema_7(&db_tx, epochs_ref)?; SortitionDB::apply_schema_8_tables(&db_tx, epochs_ref)?; + SortitionDB::apply_schema_9(&db_tx, epochs_ref)?; db_tx.instantiate_index()?; @@ -2833,12 +2923,12 @@ impl SortitionDB { ) -> Result<(), db_error> { let epochs = StacksEpoch::validate_epochs(epochs); for epoch in epochs.into_iter() { - let args: &[&dyn ToSql] = &[ - &(epoch.epoch_id as u32), - &u64_to_sql(epoch.start_height)?, - &u64_to_sql(epoch.end_height)?, - &epoch.block_limit, - &epoch.network_epoch, + let args = params![ + (epoch.epoch_id as u32), + u64_to_sql(epoch.start_height)?, + u64_to_sql(epoch.end_height)?, + epoch.block_limit, + epoch.network_epoch, ]; db_tx.execute( "INSERT INTO epochs (epoch_id,start_block_height,end_block_height,block_limit,network_epoch) VALUES (?1,?2,?3,?4,?5)", @@ -2907,12 +2997,12 @@ impl SortitionDB { info!("Replace existing epochs with new epochs"); db_tx.execute("DELETE FROM epochs;", NO_PARAMS)?; for epoch in epochs.into_iter() { - let args: &[&dyn ToSql] = &[ - &(epoch.epoch_id as u32), - &u64_to_sql(epoch.start_height)?, - &u64_to_sql(epoch.end_height)?, - &epoch.block_limit, - &epoch.network_epoch, + let args = params![ + (epoch.epoch_id as u32), + u64_to_sql(epoch.start_height)?, + u64_to_sql(epoch.end_height)?, + epoch.block_limit, + epoch.network_epoch, ]; db_tx.execute( "INSERT INTO epochs (epoch_id,start_block_height,end_block_height,block_limit,network_epoch) VALUES (?1,?2,?3,?4,?5)", @@ -2929,8 +3019,8 @@ impl SortitionDB { sortition_id: &SortitionId, ) -> Result, db_error> { let qry = "SELECT * FROM block_commits WHERE txid = ?1 AND sortition_id = ?2"; - let args: [&dyn ToSql; 2] = [&txid, &sortition_id]; - query_row(conn, qry, &args) + let args = params![txid, sortition_id]; + query_row(conn, qry, args) } /// Get the Sortition ID for the burnchain block containing `txid`'s parent. @@ -2941,7 +3031,7 @@ impl SortitionDB { sortition_id: &SortitionId, ) -> Result, db_error> { let qry = "SELECT parent_sortition_id AS sortition_id FROM block_commit_parents WHERE block_commit_parents.block_commit_txid = ?1 AND block_commit_parents.block_commit_sortition_id = ?2"; - let args: &[&dyn ToSql] = &[txid, sortition_id]; + let args = params![txid, sortition_id]; query_row(conn, qry, args) } @@ -2966,7 +3056,7 @@ impl SortitionDB { height: u64, ) -> Result, db_error> { let qry = "SELECT * FROM snapshots WHERE block_height = ?1"; - query_rows(conn, qry, &[u64_to_sql(height)?]) + query_rows(conn, qry, params![u64_to_sql(height)?]) } /// Get all preprocessed reward sets and their associated anchor blocks @@ -3034,86 +3124,29 @@ impl SortitionDB { /// Is a particular database version supported by a given epoch? pub fn is_db_version_supported_in_epoch(epoch: StacksEpochId, version: &str) -> bool { + let version_u32: u32 = version.parse().unwrap_or_else(|e| { + error!("Failed to parse sortdb version as u32: {e}"); + 0 + }); match epoch { StacksEpochId::Epoch10 => true, - StacksEpochId::Epoch20 => { - version == "1" - || version == "2" - || version == "3" - || version == "4" - || version == "5" - || version == "6" - || version == "7" - || version == "8" - } - StacksEpochId::Epoch2_05 => { - version == "2" - || version == "3" - || version == "4" - || version == "5" - || version == "6" - || version == "7" - || version == "8" - } - StacksEpochId::Epoch21 => { - version == "3" - || version == "4" - || version == "5" - || version == "6" - || version == "7" - || version == "8" - } - StacksEpochId::Epoch22 => { - version == "3" - || version == "4" - || version == "5" - || version == "6" - || version == "7" - || version == "8" - } - StacksEpochId::Epoch23 => { - version == "3" - || version == "4" - || version == "5" - || version == "6" - || version == "7" - || version == "8" - } - StacksEpochId::Epoch24 => { - version == "3" - || version == "4" - || version == "5" - || version == "6" - || version == "7" - || version == "8" - } - StacksEpochId::Epoch25 => { - version == "3" - || version == "4" - || version == "5" - || version == "6" - || version == "7" - || version == "8" - } - StacksEpochId::Epoch30 => { - version == "3" - || version == "4" - || version == "5" - || version == "6" - || version == "7" - || version == "8" - } + StacksEpochId::Epoch20 => version_u32 >= 1, + StacksEpochId::Epoch2_05 => version_u32 >= 2, + StacksEpochId::Epoch21 => version_u32 >= 3, + StacksEpochId::Epoch22 => version_u32 >= 3, + StacksEpochId::Epoch23 => version_u32 >= 3, + StacksEpochId::Epoch24 => version_u32 >= 3, + StacksEpochId::Epoch25 => version_u32 >= 3, + StacksEpochId::Epoch30 => version_u32 >= 3, } } /// Get the database schema version, given a DB connection fn get_schema_version(conn: &Connection) -> Result, db_error> { let version = conn - .query_row( - "SELECT MAX(version) from db_config", - rusqlite::NO_PARAMS, - |row| row.get(0), - ) + .query_row("SELECT MAX(version) from db_config", NO_PARAMS, |row| { + row.get(0) + }) .optional()?; Ok(version) } @@ -3149,11 +3182,11 @@ impl SortitionDB { tx.execute_batch(sql_exec)?; } - let typical_rules: &[&dyn ToSql] = &[&(ASTRules::Typical as u8), &0i64]; + let typical_rules = params![(ASTRules::Typical as u8), 0i64]; - let precheck_size_rules: &[&dyn ToSql] = &[ - &(ASTRules::PrecheckSize as u8), - &u64_to_sql(AST_RULES_PRECHECK_SIZE)?, + let precheck_size_rules = params![ + (ASTRules::PrecheckSize as u8), + u64_to_sql(AST_RULES_PRECHECK_SIZE)?, ]; tx.execute( @@ -3249,7 +3282,7 @@ impl SortitionDB { // skip if this step was done if table_exists(&tx, "stacks_chain_tips")? { let sql = "SELECT 1 FROM stacks_chain_tips WHERE sortition_id = ?1"; - let args = rusqlite::params![&canonical_tip.sortition_id]; + let args = params![canonical_tip.sortition_id]; if let Ok(Some(_)) = query_row::(&tx, sql, args) { info!("`stacks_chain_tips` appears to have been populated already; skipping this step"); return Ok(()); @@ -3265,11 +3298,11 @@ impl SortitionDB { ); for snapshot in snapshots.into_iter() { let sql = "INSERT OR REPLACE INTO stacks_chain_tips (sortition_id,consensus_hash,block_hash,block_height) VALUES (?1,?2,?3,?4)"; - let args: &[&dyn ToSql] = &[ - &snapshot.sortition_id, - &snapshot.canonical_stacks_tip_consensus_hash, - &snapshot.canonical_stacks_tip_hash, - &u64_to_sql(snapshot.canonical_stacks_tip_height)?, + let args = params![ + snapshot.sortition_id, + snapshot.canonical_stacks_tip_consensus_hash, + snapshot.canonical_stacks_tip_hash, + u64_to_sql(snapshot.canonical_stacks_tip_height)?, ]; tx.execute(sql, args)?; } @@ -3286,11 +3319,18 @@ impl SortitionDB { ) -> Result<(), db_error> { let pox_constants = self.pox_constants.clone(); for rc in 0..=(canonical_tip.block_height / u64::from(pox_constants.reward_cycle_length)) { - if pox_constants.reward_cycle_to_block_height(self.first_block_height, rc) - > canonical_tip.block_height - { + let rc_start = pox_constants.reward_cycle_to_block_height(self.first_block_height, rc); + if rc_start > canonical_tip.block_height { break; } + let epoch_at_height = SortitionDB::get_stacks_epoch(self.conn(), rc_start)? + .unwrap_or_else(|| panic!("FATAL: no epoch defined for burn height {}", rc_start)) + .epoch_id; + + if epoch_at_height >= StacksEpochId::Epoch30 { + break; + } + info!("Regenerating reward set for cycle {}", &rc); migrator.regenerate_reward_cycle_info(self, rc)?; } @@ -3335,6 +3375,22 @@ impl SortitionDB { Ok(()) } + #[cfg_attr(test, mutants::skip)] + fn apply_schema_9(tx: &DBTx, epochs: &[StacksEpoch]) -> Result<(), db_error> { + for sql_exec in SORTITION_DB_SCHEMA_9 { + tx.execute_batch(sql_exec)?; + } + + SortitionDB::validate_and_replace_epochs(&tx, epochs)?; + + tx.execute( + "INSERT OR REPLACE INTO db_config (version) VALUES (?1)", + &["9"], + )?; + + Ok(()) + } + fn check_schema_version_or_error(&mut self) -> Result<(), db_error> { match SortitionDB::get_schema_version(self.conn()) { Ok(Some(version)) => { @@ -3395,6 +3451,10 @@ impl SortitionDB { tx.commit()?; self.apply_schema_8_migration(migrator.take())?; + } else if version == "8" { + let tx = self.tx_begin()?; + SortitionDB::apply_schema_9(&tx.deref(), epochs)?; + tx.commit()?; } else if version == expected_version { let tx = self.tx_begin()?; SortitionDB::validate_and_replace_epochs(&tx, epochs)?; @@ -3428,6 +3488,7 @@ impl SortitionDB { path: path.to_string(), marf, readwrite: true, + dryrun: false, first_block_height: migrator.get_burnchain().first_block_height, first_burn_header_hash: migrator.get_burnchain().first_block_hash.clone(), pox_constants: migrator.get_burnchain().pox_constants.clone(), @@ -3465,7 +3526,7 @@ impl SortitionDB { ast_rules: ASTRules, height: u64, ) -> Result<(), db_error> { - let rules: &[&dyn ToSql] = &[&u64_to_sql(height)?, &(ast_rules as u8)]; + let rules = params![u64_to_sql(height)?, (ast_rules as u8)]; tx.execute( "UPDATE ast_rule_heights SET block_height = ?1 WHERE ast_rule_id = ?2", @@ -3506,65 +3567,92 @@ impl SortitionDB { } /// Store a pre-processed reward set. - /// `sortition_id` is the first sortition ID of the prepare phase + /// `sortition_id` is the first sortition ID of the prepare phase. + /// No-op if the reward set has a selected-and-unknown anchor block. pub fn store_preprocessed_reward_set( sort_tx: &mut DBTx, sortition_id: &SortitionId, rc_info: &RewardCycleInfo, ) -> Result<(), db_error> { - let sql = "INSERT INTO preprocessed_reward_sets (sortition_id,reward_set) VALUES (?1,?2)"; + if !rc_info.is_reward_info_known() { + return Ok(()); + } + let sql = "REPLACE INTO preprocessed_reward_sets (sortition_id,reward_set) VALUES (?1,?2)"; let rc_json = serde_json::to_string(rc_info).map_err(db_error::SerializationError)?; - let args: &[&dyn ToSql] = &[sortition_id, &rc_json]; + let args = params![sortition_id, rc_json]; sort_tx.execute(sql, args)?; Ok(()) } - /// Figure out the reward cycle for `tip` and lookup the preprocessed - /// reward set (if it exists) for the active reward cycle during `tip` - pub fn get_preprocessed_reward_set_of( + /// Get the prepare phase end sortition ID of a reward cycle. This is the last prepare + /// phase sortition for the prepare phase that began this reward cycle (i.e. the returned + /// sortition will be in the preceding reward cycle) + /// Wrapper around SortitionDBConn::get_prepare_phase_end_sortition_id_for_reward_ccyle() + pub fn get_prepare_phase_end_sortition_id_for_reward_cycle( &self, tip: &SortitionId, - ) -> Result, db_error> { - let tip_sn = SortitionDB::get_block_snapshot(self.conn(), tip)?.ok_or_else(|| { - error!( - "Could not find snapshot for sortition while fetching reward set"; - "tip_sortition_id" => %tip, - ); - db_error::NotFoundError - })?; - - let reward_cycle_id = self - .pox_constants - .block_height_to_reward_cycle(self.first_block_height, tip_sn.block_height) - .expect("FATAL: stored snapshot with block height < first_block_height"); - - let prepare_phase_start = self - .pox_constants - .reward_cycle_to_block_height(self.first_block_height, reward_cycle_id) - .saturating_sub(self.pox_constants.prepare_length.into()); + reward_cycle_id: u64, + ) -> Result { + self.index_conn() + .get_prepare_phase_end_sortition_id_for_reward_cycle( + &self.pox_constants, + self.first_block_height, + tip, + reward_cycle_id, + ) + } - let first_sortition = get_ancestor_sort_id( - &self.index_conn(), - prepare_phase_start, - &tip_sn.sortition_id, - )? - .ok_or_else(|| { - error!( - "Could not find prepare phase start ancestor while fetching reward set"; - "tip_sortition_id" => %tip, - "reward_cycle_id" => reward_cycle_id, - "prepare_phase_start_height" => prepare_phase_start - ); - db_error::NotFoundError - })?; + /// Get the prepare phase start sortition ID of a reward cycle. This is the first prepare + /// phase sortition for the prepare phase that began this reward cycle (i.e. the returned + /// sortition will be in the preceding reward cycle) + /// Wrapper around SortitionDBConn::get_prepare_phase_start_sortition_id_for_reward_cycle(). + pub fn get_prepare_phase_start_sortition_id_for_reward_cycle( + &self, + tip: &SortitionId, + reward_cycle_id: u64, + ) -> Result { + self.index_conn() + .get_prepare_phase_start_sortition_id_for_reward_cycle( + &self.pox_constants, + self.first_block_height, + tip, + reward_cycle_id, + ) + } - info!("Fetching preprocessed reward set"; - "tip_sortition_id" => %tip, - "reward_cycle_id" => reward_cycle_id, - "prepare_phase_start_sortition_id" => %first_sortition, - ); + /// Figure out the reward cycle for `tip` and lookup the preprocessed + /// reward set (if it exists) for the active reward cycle during `tip`. + /// Returns the reward cycle info on success. + /// Returns Error on DB errors, as well as if the reward set is not yet processed. + /// Wrapper around SortitionDBConn::get_preprocessed_reward_set_for_reward_cycle(). + pub fn get_preprocessed_reward_set_for_reward_cycle( + &self, + tip: &SortitionId, + reward_cycle_id: u64, + ) -> Result<(RewardCycleInfo, SortitionId), db_error> { + self.index_conn() + .get_preprocessed_reward_set_for_reward_cycle( + &self.pox_constants, + self.first_block_height, + tip, + reward_cycle_id, + ) + } - Self::get_preprocessed_reward_set(self.conn(), &first_sortition) + /// Figure out the reward cycle for `tip` and lookup the preprocessed + /// reward set (if it exists) for the active reward cycle during `tip`. + /// Returns the reward cycle info on success. + /// Returns Error on DB errors, as well as if the reward set is not yet processed. + /// Wrapper around SortitionDBConn::get_preprocessed_reward_set_of(). + pub fn get_preprocessed_reward_set_of( + &self, + tip: &SortitionId, + ) -> Result { + Ok(self.index_conn().get_preprocessed_reward_set_of( + &self.pox_constants, + self.first_block_height, + tip, + )?) } /// Get a pre-processed reawrd set. @@ -3574,7 +3662,7 @@ impl SortitionDB { sortition_id: &SortitionId, ) -> Result, db_error> { let sql = "SELECT reward_set FROM preprocessed_reward_sets WHERE sortition_id = ?1"; - let args: &[&dyn ToSql] = &[sortition_id]; + let args = params![sortition_id]; let reward_set_opt: Option = sortdb.query_row(sql, args, |row| row.get(0)).optional()?; @@ -3585,6 +3673,25 @@ impl SortitionDB { Ok(rc_info) } + + /// Get the number of entries in the reward set, given a sortition ID within the reward cycle + /// for which this set is active. + pub fn get_preprocessed_reward_set_size(&self, tip: &SortitionId) -> Option { + let Ok(reward_info) = &self.get_preprocessed_reward_set_of(&tip) else { + return None; + }; + let Some(reward_set) = reward_info.known_selected_anchor_block() else { + return None; + }; + + reward_set + .signers + .clone() + .map(|x| x.len()) + .unwrap_or(0) + .try_into() + .ok() + } } impl<'a> SortitionDBTx<'a> { @@ -3616,6 +3723,7 @@ impl<'a> SortitionDBConn<'a> { first_block_height: self.context.first_block_height.clone(), chain_tip: chain_tip.clone(), pox_constants: self.context.pox_constants.clone(), + dryrun: self.context.dryrun, }, } } @@ -3786,7 +3894,7 @@ impl<'a> SortitionDBConn<'a> { sortition_id: &SortitionId, ) -> Result<(Vec, u128), db_error> { let sql = "SELECT pox_payouts FROM snapshots WHERE sortition_id = ?1"; - let args: &[&dyn ToSql] = &[sortition_id]; + let args = params![sortition_id]; let pox_addrs_json: String = query_row(self.conn(), sql, args)?.ok_or(db_error::NotFoundError)?; @@ -3794,6 +3902,123 @@ impl<'a> SortitionDBConn<'a> { serde_json::from_str(&pox_addrs_json).expect("FATAL: failed to decode pox payout JSON"); Ok(pox_addrs) } + + /// Figure out the reward cycle for `tip` and lookup the preprocessed + /// reward set (if it exists) for the active reward cycle during `tip`. + /// Returns the reward cycle info on success. + /// Returns Error on DB errors, as well as if the reward set is not yet processed. + pub fn get_preprocessed_reward_set_of( + &self, + pox_constants: &PoxConstants, + first_block_height: u64, + tip: &SortitionId, + ) -> Result { + let tip_sn = SortitionDB::get_block_snapshot(self, tip)?.ok_or_else(|| { + error!( + "Could not find snapshot for sortition while fetching reward set"; + "tip_sortition_id" => %tip, + ); + db_error::NotFoundError + })?; + + let reward_cycle_id = pox_constants + .block_height_to_reward_cycle(first_block_height, tip_sn.block_height) + .expect("FATAL: stored snapshot with block height < first_block_height"); + + self.get_preprocessed_reward_set_for_reward_cycle( + pox_constants, + first_block_height, + tip, + reward_cycle_id, + ) + .and_then(|(reward_cycle_info, _anchor_sortition_id)| Ok(reward_cycle_info)) + } + + /// Get the prepare phase end sortition ID of a reward cycle. This is the last prepare + /// phase sortition for the prepare phase that began this reward cycle (i.e. the returned + /// sortition will be in the preceding reward cycle) + pub fn get_prepare_phase_end_sortition_id_for_reward_cycle( + &self, + pox_constants: &PoxConstants, + first_block_height: u64, + tip: &SortitionId, + reward_cycle_id: u64, + ) -> Result { + let prepare_phase_end = pox_constants + .reward_cycle_to_block_height(first_block_height, reward_cycle_id) + .saturating_sub(1); + + let last_sortition = + get_ancestor_sort_id(self, prepare_phase_end, tip)?.ok_or_else(|| { + error!( + "Could not find prepare phase end ancestor while fetching reward set"; + "tip_sortition_id" => %tip, + "reward_cycle_id" => reward_cycle_id, + "prepare_phase_end_height" => prepare_phase_end + ); + db_error::NotFoundError + })?; + Ok(last_sortition) + } + + /// Get the prepare phase start sortition ID of a reward cycle. This is the first prepare + /// phase sortition for the prepare phase that began this reward cycle (i.e. the returned + /// sortition will be in the preceding reward cycle) + pub fn get_prepare_phase_start_sortition_id_for_reward_cycle( + &self, + pox_constants: &PoxConstants, + first_block_height: u64, + tip: &SortitionId, + reward_cycle_id: u64, + ) -> Result { + let prepare_phase_start = pox_constants + .reward_cycle_to_block_height(first_block_height, reward_cycle_id) + .saturating_sub(pox_constants.prepare_length.into()); + + let first_sortition = + get_ancestor_sort_id(self, prepare_phase_start, tip)?.ok_or_else(|| { + error!( + "Could not find prepare phase start ancestor while fetching reward set"; + "tip_sortition_id" => %tip, + "reward_cycle_id" => reward_cycle_id, + "prepare_phase_start_height" => prepare_phase_start + ); + db_error::NotFoundError + })?; + Ok(first_sortition) + } + + /// Get the reward set for a reward cycle, given the reward cycle tip. The reward cycle info + /// will be returned for the reward set in which `tip` belongs (i.e. the reward set calculated + /// in the preceding reward cycle). + /// Return the reward cycle info for this reward cycle, as well as the first prepare-phase + /// sortition ID under which this reward cycle info is stored. + /// Returns Error on DB Error, or if the reward cycle info is not processed yet. + pub fn get_preprocessed_reward_set_for_reward_cycle( + &self, + pox_constants: &PoxConstants, + first_block_height: u64, + tip: &SortitionId, + reward_cycle_id: u64, + ) -> Result<(RewardCycleInfo, SortitionId), db_error> { + let first_sortition = self.get_prepare_phase_start_sortition_id_for_reward_cycle( + pox_constants, + first_block_height, + tip, + reward_cycle_id, + )?; + info!("Fetching preprocessed reward set"; + "tip_sortition_id" => %tip, + "reward_cycle_id" => reward_cycle_id, + "prepare_phase_start_sortition_id" => %first_sortition, + ); + + Ok(( + SortitionDB::get_preprocessed_reward_set(self, &first_sortition)? + .ok_or(db_error::NotFoundError)?, + first_sortition, + )) + } } // High-level functions used by ChainsCoordinator @@ -3876,21 +4101,21 @@ impl SortitionDB { stacks_block_accepted: Option, ) -> Result<(), BurnchainError> { if let Some(stacks_block_accepted) = stacks_block_accepted { - let args: &[&dyn ToSql] = &[ + let args = params![ sortition_id, - &u64_to_sql(canonical_stacks_height)?, + u64_to_sql(canonical_stacks_height)?, canonical_stacks_bhh, canonical_stacks_ch, - &stacks_block_accepted, + stacks_block_accepted, ]; tx.execute( "UPDATE snapshots SET pox_valid = 1, canonical_stacks_tip_height = ?2, canonical_stacks_tip_hash = ?3, canonical_stacks_tip_consensus_hash = ?4, stacks_block_accepted = ?5 WHERE sortition_id = ?1", args )?; } else { - let args: &[&dyn ToSql] = &[ + let args = params![ sortition_id, - &u64_to_sql(canonical_stacks_height)?, + u64_to_sql(canonical_stacks_height)?, canonical_stacks_bhh, canonical_stacks_ch, ]; @@ -4091,6 +4316,7 @@ impl SortitionDB { next_pox_info: Option, announce_to: F, ) -> Result<(BlockSnapshot, BurnchainStateTransition), BurnchainError> { + let dryrun = self.dryrun; let parent_sort_id = self .get_sortition_id(&burn_header.parent_block_hash, from_tip)? .ok_or_else(|| { @@ -4170,14 +4396,19 @@ impl SortitionDB { initial_mining_bonus, )?; - sortition_db_handle.store_transition_ops(&new_snapshot.0.sortition_id, &new_snapshot.1)?; + if !dryrun { + sortition_db_handle + .store_transition_ops(&new_snapshot.0.sortition_id, &new_snapshot.1)?; + } announce_to(reward_set_info); - // commit everything! - sortition_db_handle.commit().expect( - "Failed to commit to sortition db after announcing reward set info, state corrupted.", - ); + if !dryrun { + // commit everything! + sortition_db_handle.commit().expect( + "Failed to commit to sortition db after announcing reward set info, state corrupted.", + ); + } Ok((new_snapshot.0, new_snapshot.1)) } @@ -4449,7 +4680,7 @@ impl SortitionDB { burnchain_header_hash: &BurnchainHeaderHash, ) -> Result, db_error> { let sql = "SELECT parent_burn_header_hash AS burn_header_hash FROM snapshots WHERE burn_header_hash = ?1"; - let args: &[&dyn ToSql] = &[burnchain_header_hash]; + let args = params![burnchain_header_hash]; let mut rows = query_rows::(conn, sql, args)?; // there can be more than one if there was a PoX reorg. If so, make sure they're _all the @@ -4505,12 +4736,25 @@ impl SortitionDB { Ok(ret) } + /// DO NOT CALL during Stacks block processing (including during Clarity VM evaluation). This function returns the latest data known to the node, which may not have been at the time of original block assembly. pub fn index_handle_at_tip<'a>(&'a self) -> SortitionHandleConn<'a> { let sortition_id = SortitionDB::get_canonical_sortition_tip(self.conn()).unwrap(); self.index_handle(&sortition_id) } + /// Open an index handle at the given consensus hash + /// Returns a db_error::NotFoundError if `ch` cannot be found + pub fn index_handle_at_ch<'a>( + &'a self, + ch: &ConsensusHash, + ) -> Result, db_error> { + let sortition_id = Self::get_sortition_id_by_consensus(self.conn(), ch)? + .ok_or_else(|| db_error::NotFoundError)?; + Ok(self.index_handle(&sortition_id)) + } + /// Open a tx handle at the burn chain tip + /// DO NOT CALL during Stacks block processing (including during Clarity VM evaluation). This function returns the latest data known to the node, which may not have been at the time of original block assembly. pub fn tx_begin_at_tip<'a>(&'a mut self) -> SortitionHandleTx<'a> { let sortition_id = SortitionDB::get_canonical_sortition_tip(self.conn()).unwrap(); self.tx_handle_begin(&sortition_id).unwrap() @@ -4520,6 +4764,7 @@ impl SortitionDB { /// Returns Ok(Some(tip info)) on success /// Returns Ok(None) if there are no Nakamoto blocks in this tip /// Returns Err(..) on other DB error + /// DO NOT CALL during Stacks block processing (including during Clarity VM evaluation). This function returns the latest data known to the node, which may not have been at the time of original block assembly. pub fn get_canonical_nakamoto_tip_hash_and_height( conn: &Connection, tip: &BlockSnapshot, @@ -4544,6 +4789,7 @@ impl SortitionDB { } /// Get the canonical Stacks chain tip -- this gets memoized on the canonical burn chain tip. + /// DO NOT CALL during Stacks block processing (including during Clarity VM evaluation). This function returns the latest data known to the node, which may not have been at the time of original block assembly. pub fn get_canonical_stacks_chain_tip_hash_and_height( conn: &Connection, ) -> Result<(ConsensusHash, BlockHeaderHash, u64), db_error> { @@ -4571,6 +4817,7 @@ impl SortitionDB { } /// Get the canonical Stacks chain tip -- this gets memoized on the canonical burn chain tip. + /// DO NOT CALL during Stacks block processing (including during Clarity VM evaluation). This function returns the latest data known to the node, which may not have been at the time of original block assembly. pub fn get_canonical_stacks_chain_tip_hash( conn: &Connection, ) -> Result<(ConsensusHash, BlockHeaderHash), db_error> { @@ -4708,7 +4955,7 @@ impl SortitionDB { conn: &Connection, ) -> Result<(u64, BurnchainHeaderHash), db_error> { let sql = "SELECT block_height, burn_header_hash FROM snapshots WHERE consensus_hash = ?1"; - let args = rusqlite::params!(&ConsensusHash::empty()); + let args = params![ConsensusHash::empty()]; let mut stmt = conn.prepare(sql)?; let mut rows = stmt.query(args)?; while let Some(row) = rows.next()? { @@ -4796,7 +5043,7 @@ impl SortitionDB { sortition: &SortitionId, ) -> Result, db_error> { let qry = "SELECT * FROM block_commits WHERE sortition_id = ?1 ORDER BY vtxindex ASC"; - let args: &[&dyn ToSql] = &[sortition]; + let args = params![sortition]; query_rows(conn, qry, args) } @@ -4808,7 +5055,7 @@ impl SortitionDB { sortition: &SortitionId, ) -> Result, db_error> { let qry = "SELECT * FROM missed_commits WHERE intended_sortition_id = ?1"; - let args: &[&dyn ToSql] = &[sortition]; + let args = params![sortition]; query_rows(conn, qry, args) } @@ -4820,7 +5067,7 @@ impl SortitionDB { sortition: &SortitionId, ) -> Result, db_error> { let qry = "SELECT * FROM leader_keys WHERE sortition_id = ?1 ORDER BY vtxindex ASC"; - let args: &[&dyn ToSql] = &[sortition]; + let args = params![sortition]; query_rows(conn, qry, args) } @@ -4834,7 +5081,7 @@ impl SortitionDB { let qry = "SELECT vtxindex FROM block_commits WHERE sortition_id = ?1 AND txid = ( SELECT winning_block_txid FROM snapshots WHERE sortition_id = ?2 LIMIT 1) LIMIT 1"; - let args: &[&dyn ToSql] = &[sortition, sortition]; + let args = params![sortition, sortition]; conn.query_row(qry, args, |row| row.get(0)) .optional() .map_err(db_error::from) @@ -4916,7 +5163,7 @@ impl SortitionDB { assert!(block_height < BLOCK_HEIGHT_MAX); let qry = "SELECT * FROM block_commits WHERE sortition_id = ?1 AND block_height = ?2 AND vtxindex = ?3 LIMIT 2"; - let args: &[&dyn ToSql] = &[sortition, &u64_to_sql(block_height)?, &vtxindex]; + let args = params![sortition, u64_to_sql(block_height)?, vtxindex]; query_row_panic(conn, qry, args, || { format!( "Multiple parent blocks at {},{} in {}", @@ -4945,10 +5192,10 @@ impl SortitionDB { }; let qry = "SELECT * FROM leader_keys WHERE sortition_id = ?1 AND block_height = ?2 AND vtxindex = ?3 LIMIT 2"; - let args: &[&dyn ToSql] = &[ - &ancestor_snapshot.sortition_id, - &u64_to_sql(key_block_height)?, - &key_vtxindex, + let args = params![ + ancestor_snapshot.sortition_id, + u64_to_sql(key_block_height)?, + key_vtxindex, ]; query_row_panic(ic, qry, args, || { format!( @@ -4983,8 +5230,8 @@ impl SortitionDB { }; let qry = "SELECT * FROM block_commits WHERE sortition_id = ?1 AND block_header_hash = ?2 AND txid = ?3"; - let args: [&dyn ToSql; 3] = [&sortition_id, &block_hash, &winning_txid]; - query_row_panic(conn, qry, &args, || { + let args = params![sortition_id, block_hash, winning_txid]; + query_row_panic(conn, qry, args, || { format!("FATAL: multiple block commits for {}", &block_hash) }) } @@ -5039,9 +5286,9 @@ impl SortitionDB { ) -> Result, db_error> { let sql = "SELECT * FROM epochs WHERE start_block_height <= ?1 AND ?2 < end_block_height LIMIT 1"; - let args: &[&dyn ToSql] = &[ - &u64_to_sql(burn_block_height)?, - &u64_to_sql(burn_block_height)?, + let args = params![ + u64_to_sql(burn_block_height)?, + u64_to_sql(burn_block_height)?, ]; query_row(conn, sql, args) } @@ -5070,10 +5317,12 @@ impl SortitionDB { epoch_id: &StacksEpochId, ) -> Result, db_error> { let sql = "SELECT * FROM epochs WHERE epoch_id = ?1 LIMIT 1"; - let args: &[&dyn ToSql] = &[&(*epoch_id as u32)]; + let args = params![*epoch_id as u32]; query_row(conn, sql, args) } + // TODO: add tests from mutation testing results #4849 + #[cfg_attr(test, mutants::skip)] /// Are microblocks disabled by Epoch 2.5 at the height specified /// in `at_burn_height`? pub fn are_microblocks_disabled(conn: &DBConn, at_burn_height: u64) -> Result { @@ -5243,6 +5492,11 @@ impl<'a> SortitionHandleTx<'a> { sn.canonical_stacks_tip_consensus_hash = parent_sn.canonical_stacks_tip_consensus_hash; } + if self.context.dryrun { + // don't do any inserts + return Ok(root_hash); + } + self.insert_block_snapshot(&sn, pox_payout)?; for block_op in block_ops { @@ -5300,9 +5554,9 @@ impl<'a> SortitionHandleTx<'a> { let create = "CREATE TABLE IF NOT EXISTS snapshot_burn_distributions (sortition_id TEXT PRIMARY KEY, data TEXT NOT NULL);"; self.execute(create, NO_PARAMS).unwrap(); let sql = "INSERT INTO snapshot_burn_distributions (sortition_id, data) VALUES (?, ?)"; - let args: &[&dyn ToSql] = &[ + let args = params![ new_sortition, - &serde_json::to_string(&transition.burn_dist).unwrap(), + serde_json::to_string(&transition.burn_dist).unwrap(), ]; self.execute(sql, args).unwrap(); } @@ -5321,10 +5575,10 @@ impl<'a> SortitionHandleTx<'a> { transition: &BurnchainStateTransition, ) -> Result<(), db_error> { let sql = "INSERT INTO snapshot_transition_ops (sortition_id, accepted_ops, consumed_keys) VALUES (?, ?, ?)"; - let args: &[&dyn ToSql] = &[ + let args = params![ new_sortition, - &serde_json::to_string(&transition.accepted_ops).unwrap(), - &serde_json::to_string(&transition.consumed_leader_keys).unwrap(), + serde_json::to_string(&transition.accepted_ops).unwrap(), + serde_json::to_string(&transition.consumed_leader_keys).unwrap(), ]; self.execute(sql, args)?; self.store_burn_distribution(new_sortition, transition); @@ -5350,7 +5604,9 @@ impl<'a> SortitionHandleTx<'a> { BlockstackOperationType::LeaderKeyRegister(ref op) => { info!( "ACCEPTED({}) leader key register {} at {},{}", - op.block_height, &op.txid, op.block_height, op.vtxindex + op.block_height, &op.txid, op.block_height, op.vtxindex; + "consensus_hash" => %op.consensus_hash, + "burn_header_hash" => %op.burn_header_hash ); self.insert_leader_key(op, sort_id) } @@ -5358,7 +5614,8 @@ impl<'a> SortitionHandleTx<'a> { info!( "ACCEPTED({}) leader block commit {} at {},{}", op.block_height, &op.txid, op.block_height, op.vtxindex; - "apparent_sender" => %op.apparent_sender + "apparent_sender" => %op.apparent_sender, + "stacks_block_hash" => %op.block_header_hash ); self.insert_block_commit(op, sort_id) } @@ -5379,7 +5636,8 @@ impl<'a> SortitionHandleTx<'a> { BlockstackOperationType::PreStx(ref op) => { info!( "ACCEPTED({}) pre stack stx op {} at {},{}", - op.block_height, &op.txid, op.block_height, op.vtxindex + op.block_height, &op.txid, op.block_height, op.vtxindex; + "burn_header_hash" => %op.burn_header_hash ); // no need to store this op in the sortition db. Ok(()) @@ -5412,14 +5670,14 @@ impl<'a> SortitionHandleTx<'a> { ) -> Result<(), db_error> { assert!(leader_key.block_height < BLOCK_HEIGHT_MAX); - let args: &[&dyn ToSql] = &[ - &leader_key.txid, - &leader_key.vtxindex, - &u64_to_sql(leader_key.block_height)?, - &leader_key.burn_header_hash, - &leader_key.consensus_hash, - &leader_key.public_key.to_hex(), - &to_hex(&leader_key.memo), + let args = params![ + leader_key.txid, + leader_key.vtxindex, + u64_to_sql(leader_key.block_height)?, + leader_key.burn_header_hash, + leader_key.consensus_hash, + leader_key.public_key.to_hex(), + to_hex(&leader_key.memo), sort_id, ]; @@ -5430,18 +5688,18 @@ impl<'a> SortitionHandleTx<'a> { /// Insert a stack-stx op fn insert_stack_stx(&mut self, op: &StackStxOp) -> Result<(), db_error> { - let args: &[&dyn ToSql] = &[ - &op.txid, - &op.vtxindex, - &u64_to_sql(op.block_height)?, - &op.burn_header_hash, - &op.sender.to_string(), - &op.reward_addr.to_db_string(), - &op.stacked_ustx.to_string(), - &op.num_cycles, - &serde_json::to_string(&op.signer_key).unwrap(), - &serde_json::to_string(&op.max_amount).unwrap(), - &op.auth_id, + let args = params![ + op.txid, + op.vtxindex, + u64_to_sql(op.block_height)?, + op.burn_header_hash, + op.sender.to_string(), + op.reward_addr.to_db_string(), + op.stacked_ustx.to_string(), + op.num_cycles, + serde_json::to_string(&op.signer_key).unwrap(), + serde_json::to_string(&op.max_amount).unwrap(), + op.auth_id, ]; self.execute("REPLACE INTO stack_stx (txid, vtxindex, block_height, burn_header_hash, sender_addr, reward_addr, stacked_ustx, num_cycles, signer_key, max_amount, auth_id) VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7, ?8, ?9, ?10, ?11)", args)?; @@ -5451,16 +5709,16 @@ impl<'a> SortitionHandleTx<'a> { /// Insert a delegate-stx op fn insert_delegate_stx(&mut self, op: &DelegateStxOp) -> Result<(), db_error> { - let args: &[&dyn ToSql] = &[ - &op.txid, - &op.vtxindex, - &u64_to_sql(op.block_height)?, - &op.burn_header_hash, - &op.sender.to_string(), - &op.delegate_to.to_string(), - &serde_json::to_string(&op.reward_addr).unwrap(), - &op.delegated_ustx.to_string(), - &opt_u64_to_sql(op.until_burn_height)?, + let args = params![ + op.txid, + op.vtxindex, + u64_to_sql(op.block_height)?, + op.burn_header_hash, + op.sender.to_string(), + op.delegate_to.to_string(), + serde_json::to_string(&op.reward_addr).unwrap(), + op.delegated_ustx.to_string(), + opt_u64_to_sql(op.until_burn_height)?, ]; self.execute("REPLACE INTO delegate_stx (txid, vtxindex, block_height, burn_header_hash, sender_addr, delegate_to, reward_addr, delegated_ustx, until_burn_height) VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7, ?8, ?9)", args)?; @@ -5473,17 +5731,17 @@ impl<'a> SortitionHandleTx<'a> { &mut self, op: &VoteForAggregateKeyOp, ) -> Result<(), db_error> { - let args: &[&dyn ToSql] = &[ - &op.txid, - &op.vtxindex, - &u64_to_sql(op.block_height)?, - &op.burn_header_hash, - &op.sender.to_string(), - &serde_json::to_string(&op.aggregate_key).unwrap(), - &op.round, - &u64_to_sql(op.reward_cycle)?, - &op.signer_index, - &serde_json::to_string(&op.signer_key).unwrap(), + let args = params![ + op.txid, + op.vtxindex, + u64_to_sql(op.block_height)?, + op.burn_header_hash, + op.sender.to_string(), + serde_json::to_string(&op.aggregate_key).unwrap(), + op.round, + u64_to_sql(op.reward_cycle)?, + op.signer_index, + serde_json::to_string(&op.signer_key).unwrap(), ]; self.execute("REPLACE INTO vote_for_aggregate_key (txid, vtxindex, block_height, burn_header_hash, sender_addr, aggregate_key, round, reward_cycle, signer_index, signer_key) VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7, ?8, ?9, ?10)", args)?; @@ -5493,15 +5751,15 @@ impl<'a> SortitionHandleTx<'a> { /// Insert a transfer-stx op fn insert_transfer_stx(&mut self, op: &TransferStxOp) -> Result<(), db_error> { - let args: &[&dyn ToSql] = &[ - &op.txid, - &op.vtxindex, - &u64_to_sql(op.block_height)?, - &op.burn_header_hash, - &op.sender.to_string(), - &op.recipient.to_string(), - &op.transfered_ustx.to_string(), - &to_hex(&op.memo), + let args = params![ + op.txid, + op.vtxindex, + u64_to_sql(op.block_height)?, + op.burn_header_hash, + op.sender.to_string(), + op.recipient.to_string(), + op.transfered_ustx.to_string(), + to_hex(&op.memo), ]; self.execute("REPLACE INTO transfer_stx (txid, vtxindex, block_height, burn_header_hash, sender_addr, recipient_addr, transfered_ustx, memo) VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7, ?8)", args)?; @@ -5542,31 +5800,32 @@ impl<'a> SortitionHandleTx<'a> { } } - let args: &[&dyn ToSql] = &[ - &block_commit.txid, - &block_commit.vtxindex, - &u64_to_sql(block_commit.block_height)?, - &block_commit.burn_header_hash, - &block_commit.block_header_hash, - &block_commit.new_seed, - &block_commit.parent_block_ptr, - &block_commit.parent_vtxindex, - &block_commit.key_block_ptr, - &block_commit.key_vtxindex, - &to_hex(&block_commit.memo[..]), - &block_commit.burn_fee.to_string(), - &tx_input_str, + let args = params![ + block_commit.txid, + block_commit.vtxindex, + u64_to_sql(block_commit.block_height)?, + block_commit.burn_header_hash, + block_commit.block_header_hash, + block_commit.new_seed, + block_commit.parent_block_ptr, + block_commit.parent_vtxindex, + block_commit.key_block_ptr, + block_commit.key_vtxindex, + to_hex(&block_commit.memo[..]), + block_commit.burn_fee.to_string(), + tx_input_str, sort_id, - &serde_json::to_value(&block_commit.commit_outs).unwrap(), - &block_commit.sunset_burn.to_string(), - &apparent_sender_str, - &block_commit.burn_parent_modulus, + serde_json::to_value(&block_commit.commit_outs).unwrap(), + block_commit.sunset_burn.to_string(), + apparent_sender_str, + block_commit.burn_parent_modulus, + serde_json::to_string(&block_commit.treatment).unwrap(), ]; - self.execute("INSERT INTO block_commits (txid, vtxindex, block_height, burn_header_hash, block_header_hash, new_seed, parent_block_ptr, parent_vtxindex, key_block_ptr, key_vtxindex, memo, burn_fee, input, sortition_id, commit_outs, sunset_burn, apparent_sender, burn_parent_modulus) \ - VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7, ?8, ?9, ?10, ?11, ?12, ?13, ?14, ?15, ?16, ?17, ?18)", args)?; + self.execute("INSERT INTO block_commits (txid, vtxindex, block_height, burn_header_hash, block_header_hash, new_seed, parent_block_ptr, parent_vtxindex, key_block_ptr, key_vtxindex, memo, burn_fee, input, sortition_id, commit_outs, sunset_burn, apparent_sender, burn_parent_modulus, punished) \ + VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7, ?8, ?9, ?10, ?11, ?12, ?13, ?14, ?15, ?16, ?17, ?18, ?19)", args)?; - let parent_args: &[&dyn ToSql] = &[sort_id, &block_commit.txid, &parent_sortition_id]; + let parent_args = params![sort_id, block_commit.txid, parent_sortition_id]; debug!( "Parent sortition of {},{},{} is {} (parent at {},{})", @@ -5594,7 +5853,7 @@ impl<'a> SortitionHandleTx<'a> { let tx_input_str = serde_json::to_string(&op.input).map_err(|e| db_error::SerializationError(e))?; - let args: &[&dyn ToSql] = &[&op.txid, &op.intended_sortition, &tx_input_str]; + let args = params![op.txid, op.intended_sortition, tx_input_str]; self.execute( "INSERT OR REPLACE INTO missed_commits (txid, intended_sortition_id, input) \ @@ -5646,32 +5905,32 @@ impl<'a> SortitionHandleTx<'a> { } } - let args: &[&dyn ToSql] = &[ - &u64_to_sql(snapshot.block_height)?, - &snapshot.burn_header_hash, - &u64_to_sql(snapshot.burn_header_timestamp)?, - &snapshot.parent_burn_header_hash, - &snapshot.consensus_hash, - &snapshot.ops_hash, - &snapshot.total_burn.to_string(), - &snapshot.sortition, - &snapshot.sortition_hash, - &snapshot.winning_block_txid, - &snapshot.winning_stacks_block_hash, - &snapshot.index_root, - &u64_to_sql(snapshot.num_sortitions)?, - &snapshot.stacks_block_accepted, - &u64_to_sql(snapshot.stacks_block_height)?, - &u64_to_sql(snapshot.arrival_index)?, - &u64_to_sql(snapshot.canonical_stacks_tip_height)?, - &snapshot.canonical_stacks_tip_hash, - &snapshot.canonical_stacks_tip_consensus_hash, - &snapshot.sortition_id, - &snapshot.parent_sortition_id, - &snapshot.pox_valid, - &snapshot.accumulated_coinbase_ustx.to_string(), - &pox_payouts_json, - &snapshot.miner_pk_hash, + let args = params![ + u64_to_sql(snapshot.block_height)?, + snapshot.burn_header_hash, + u64_to_sql(snapshot.burn_header_timestamp)?, + snapshot.parent_burn_header_hash, + snapshot.consensus_hash, + snapshot.ops_hash, + snapshot.total_burn.to_string(), + snapshot.sortition, + snapshot.sortition_hash, + snapshot.winning_block_txid, + snapshot.winning_stacks_block_hash, + snapshot.index_root, + u64_to_sql(snapshot.num_sortitions)?, + snapshot.stacks_block_accepted, + u64_to_sql(snapshot.stacks_block_height)?, + u64_to_sql(snapshot.arrival_index)?, + u64_to_sql(snapshot.canonical_stacks_tip_height)?, + snapshot.canonical_stacks_tip_hash, + snapshot.canonical_stacks_tip_consensus_hash, + snapshot.sortition_id, + snapshot.parent_sortition_id, + snapshot.pox_valid, + snapshot.accumulated_coinbase_ustx.to_string(), + pox_payouts_json, + snapshot.miner_pk_hash, ]; self.execute("INSERT INTO snapshots \ @@ -5945,16 +6204,6 @@ impl<'a> SortitionHandleTx<'a> { keys.push(db_keys::pox_affirmation_map().to_string()); values.push(cur_affirmation_map.encode()); - if cfg!(test) { - // last reward cycle. - // NOTE: We keep this only for testing, since this is what the original (but - // unmigratable code) did, and we need to verify that the compatibility fix to - // SortitionDB::get_last_processed_reward_cycle() is semantically compatible - // with querying this key. - keys.push(db_keys::last_reward_cycle_key().to_string()); - values.push(db_keys::last_reward_cycle_to_string(_reward_cycle)); - } - pox_payout_addrs } else { // if this snapshot consumed some reward set entries AND @@ -6037,32 +6286,27 @@ impl<'a> SortitionHandleTx<'a> { keys.push(db_keys::pox_last_selected_anchor_txid().to_string()); values.push("".to_string()); - if cfg!(test) { - // NOTE: We keep this only for testing, since this is what the original (but - // unmigratable code) did, and we need to verify that the compatibility fix to - // SortitionDB::get_last_processed_reward_cycle() is semantically compatible - // with querying this key. - keys.push(db_keys::last_reward_cycle_key().to_string()); - values.push(db_keys::last_reward_cycle_to_string(0)); - } - // no payouts vec![] }; - // commit to all newly-arrived blocks - let (mut block_arrival_keys, mut block_arrival_values) = - self.process_new_block_arrivals(parent_snapshot)?; - keys.append(&mut block_arrival_keys); - values.append(&mut block_arrival_values); - // store each indexed field - let root_hash = self.put_indexed_all( - &parent_snapshot.sortition_id, - &snapshot.sortition_id, - &keys, - &values, - )?; + let root_hash = if !self.context.dryrun { + // commit to all newly-arrived blocks + let (mut block_arrival_keys, mut block_arrival_values) = + self.process_new_block_arrivals(parent_snapshot)?; + keys.append(&mut block_arrival_keys); + values.append(&mut block_arrival_values); + + self.put_indexed_all( + &parent_snapshot.sortition_id, + &snapshot.sortition_id, + &keys, + &values, + )? + } else { + TrieHash([0x00; 32]) + }; // pox payout addrs must include burn addresses let num_pox_payouts = self.get_num_pox_payouts(snapshot.block_height); @@ -6291,11 +6535,11 @@ impl<'a> SortitionHandleTx<'a> { best_bhh: BlockHeaderHash, best_height: u64, ) -> Result<(), db_error> { - let args: &[&dyn ToSql] = &[ - &best_chh, - &best_bhh, - &u64_to_sql(best_height)?, - &u64_to_sql(tip.block_height)?, + let args = params![ + best_chh, + best_bhh, + u64_to_sql(best_height)?, + u64_to_sql(tip.block_height)?, ]; debug!( @@ -6359,9 +6603,9 @@ pub mod tests { use std::sync::mpsc::sync_channel; use std::thread; - use rusqlite::NO_PARAMS; use stacks_common::address::AddressHashMode; use stacks_common::types::chainstate::{BlockHeaderHash, StacksAddress, VRFSeed}; + use stacks_common::types::sqlite::NO_PARAMS; use stacks_common::util::get_epoch_time_secs; use stacks_common::util::hash::{hex_bytes, Hash160}; use stacks_common::util::vrf::*; @@ -6383,30 +6627,6 @@ pub mod tests { use crate::core::{StacksEpochExtension, *}; use crate::util_lib::db::Error as db_error; - impl<'a> SortitionHandleConn<'a> { - /// At one point in the development lifecycle, this code depended on a MARF key/value - /// pair to map the sortition tip to the last-processed reward cycle number. This data would - /// not have been present in epoch 2.4 chainstate and earlier, but would have been present in - /// epoch 2.5 and later, since at the time it was expected that all nodes would perform a - /// genesis sync when booting into epoch 2.5. However, that requirement changed at the last - /// minute, so this code was reworked to avoid the need for the MARF key. But to ensure that - /// this method is semantically consistent with the old code (which the Nakamoto chains - /// coordinator depends on), this code will test that the new reward cycle calculation matches - /// the old reward cycle calculation. - #[cfg(test)] - pub fn legacy_get_last_processed_reward_cycle(&self) -> Result { - // verify that this is semantically compatible with the older behavior, which shipped - // for epoch 2.5 but needed to be removed at the last minute in order to support a - // migration path from 2.4 chainstate to 2.5/3.0 chainstate. - let encoded_rc = self - .get_indexed(&self.context.chain_tip, &db_keys::last_reward_cycle_key())? - .expect("FATAL: no last-processed reward cycle"); - - let expected_rc = db_keys::last_reward_cycle_from_string(&encoded_rc); - Ok(expected_rc) - } - } - impl<'a> SortitionHandleTx<'a> { /// Update the canonical Stacks tip (testing only) pub fn test_update_canonical_stacks_tip( @@ -6503,6 +6723,7 @@ pub mod tests { path: path.to_string(), marf, readwrite, + dryrun: false, first_block_height, first_burn_header_hash: first_burn_hash.clone(), pox_constants: PoxConstants::test_default(), @@ -6586,31 +6807,31 @@ pub mod tests { let pox_payouts_json = serde_json::to_string(&pox_payout) .expect("FATAL: could not encode `total_pox_payouts` as JSON"); - let args = rusqlite::params![ - &u64_to_sql(first_snapshot.block_height)?, - &first_snapshot.burn_header_hash, - &u64_to_sql(first_snapshot.burn_header_timestamp)?, - &first_snapshot.parent_burn_header_hash, - &first_snapshot.consensus_hash, - &first_snapshot.ops_hash, - &first_snapshot.total_burn.to_string(), - &first_snapshot.sortition, - &first_snapshot.sortition_hash, - &first_snapshot.winning_block_txid, - &first_snapshot.winning_stacks_block_hash, - &first_snapshot.index_root, - &u64_to_sql(first_snapshot.num_sortitions)?, - &first_snapshot.stacks_block_accepted, - &u64_to_sql(first_snapshot.stacks_block_height)?, - &u64_to_sql(first_snapshot.arrival_index)?, - &u64_to_sql(first_snapshot.canonical_stacks_tip_height)?, - &first_snapshot.canonical_stacks_tip_hash, - &first_snapshot.canonical_stacks_tip_consensus_hash, - &first_snapshot.sortition_id, - &first_snapshot.parent_sortition_id, - &first_snapshot.pox_valid, - &first_snapshot.accumulated_coinbase_ustx.to_string(), - &pox_payouts_json, + let args = params![ + u64_to_sql(first_snapshot.block_height)?, + first_snapshot.burn_header_hash, + u64_to_sql(first_snapshot.burn_header_timestamp)?, + first_snapshot.parent_burn_header_hash, + first_snapshot.consensus_hash, + first_snapshot.ops_hash, + first_snapshot.total_burn.to_string(), + first_snapshot.sortition, + first_snapshot.sortition_hash, + first_snapshot.winning_block_txid, + first_snapshot.winning_stacks_block_hash, + first_snapshot.index_root, + u64_to_sql(first_snapshot.num_sortitions)?, + first_snapshot.stacks_block_accepted, + u64_to_sql(first_snapshot.stacks_block_height)?, + u64_to_sql(first_snapshot.arrival_index)?, + u64_to_sql(first_snapshot.canonical_stacks_tip_height)?, + first_snapshot.canonical_stacks_tip_hash, + first_snapshot.canonical_stacks_tip_consensus_hash, + first_snapshot.sortition_id, + first_snapshot.parent_sortition_id, + first_snapshot.pox_valid, + first_snapshot.accumulated_coinbase_ustx.to_string(), + pox_payouts_json, ]; db_tx.execute("INSERT INTO snapshots \ @@ -6644,7 +6865,7 @@ pub mod tests { height: u64, ) -> Result<(), db_error> { let tip = SortitionDB::get_canonical_burn_chain_tip(conn)?; - let args: &[&dyn ToSql] = &[ch, bhh, &u64_to_sql(height)?, &tip.sortition_id]; + let args = params![ch, bhh, u64_to_sql(height)?, tip.sortition_id]; conn.execute("UPDATE snapshots SET canonical_stacks_tip_consensus_hash = ?1, canonical_stacks_tip_hash = ?2, canonical_stacks_tip_height = ?3 WHERE sortition_id = ?4", args) .map_err(db_error::SqliteError)?; @@ -6712,6 +6933,18 @@ pub mod tests { } Ok(ret) } + + /// Get the last block-commit from a given sender + pub fn get_last_block_commit_by_sender( + conn: &DBConn, + sender: &BurnchainSigner, + ) -> Result, db_error> { + let apparent_sender_str = + serde_json::to_string(sender).map_err(|e| db_error::SerializationError(e))?; + let sql = "SELECT * FROM block_commits WHERE apparent_sender = ?1 ORDER BY block_height DESC LIMIT 1"; + let args = params![apparent_sender_str]; + query_row(conn, sql, args) + } } #[test] @@ -6982,6 +7215,7 @@ pub mod tests { block_height: block_height + 2, burn_parent_modulus: ((block_height + 1) % BURN_BLOCK_MINED_AT_MODULUS) as u8, burn_header_hash: BurnchainHeaderHash([0x03; 32]), + treatment: vec![], }; let mut db = SortitionDB::connect_test(block_height, &first_burn_hash).unwrap(); @@ -7700,6 +7934,7 @@ pub mod tests { block_height: block_height + 2, burn_parent_modulus: ((block_height + 1) % BURN_BLOCK_MINED_AT_MODULUS) as u8, burn_header_hash: BurnchainHeaderHash([0x03; 32]), + treatment: vec![], }; let mut db = SortitionDB::connect_test(block_height, &first_burn_hash).unwrap(); @@ -9916,6 +10151,7 @@ pub mod tests { block_height: block_height + 2, burn_parent_modulus: ((block_height + 1) % BURN_BLOCK_MINED_AT_MODULUS) as u8, burn_header_hash: BurnchainHeaderHash([0x03; 32]), + treatment: vec![], }; // descends from genesis @@ -9958,6 +10194,7 @@ pub mod tests { block_height: block_height + 3, burn_parent_modulus: ((block_height + 2) % BURN_BLOCK_MINED_AT_MODULUS) as u8, burn_header_hash: BurnchainHeaderHash([0x04; 32]), + treatment: vec![], }; // descends from block_commit_1 @@ -10000,6 +10237,7 @@ pub mod tests { block_height: block_height + 4, burn_parent_modulus: ((block_height + 3) % BURN_BLOCK_MINED_AT_MODULUS) as u8, burn_header_hash: BurnchainHeaderHash([0x05; 32]), + treatment: vec![], }; // descends from genesis_block_commit @@ -10042,6 +10280,7 @@ pub mod tests { block_height: block_height + 5, burn_parent_modulus: ((block_height + 4) % BURN_BLOCK_MINED_AT_MODULUS) as u8, burn_header_hash: BurnchainHeaderHash([0x06; 32]), + treatment: vec![], }; let mut db = SortitionDB::connect_test(block_height, &first_burn_hash).unwrap(); @@ -10754,4 +10993,12 @@ pub mod tests { let db_epochs = SortitionDB::get_stacks_epochs(sortdb.conn()).unwrap(); assert_eq!(db_epochs, STACKS_EPOCHS_MAINNET.to_vec()); } + + #[test] + fn latest_db_version_supports_latest_epoch() { + assert!(SortitionDB::is_db_version_supported_in_epoch( + StacksEpochId::latest(), + SORTITION_DB_VERSION + )); + } } diff --git a/stackslib/src/chainstate/burn/distribution.rs b/stackslib/src/chainstate/burn/distribution.rs index 2a168971004..ed01ae014b5 100644 --- a/stackslib/src/chainstate/burn/distribution.rs +++ b/stackslib/src/chainstate/burn/distribution.rs @@ -31,15 +31,22 @@ use crate::chainstate::burn::operations::{ BlockstackOperationType, LeaderBlockCommitOp, LeaderKeyRegisterOp, }; use crate::chainstate::stacks::StacksPublicKey; -use crate::core::MINING_COMMITMENT_WINDOW; use crate::monitoring; #[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] pub struct BurnSamplePoint { + /// min(median_burn, most_recent_burn) pub burns: u128, + /// median burn over the UTXO chain pub median_burn: u128, + /// how many times did this miner mine in the window (i.e. how long is the UTXO chain for this + /// candidate in this window). + pub frequency: u8, + /// distribution range start in a [0, 2**256) interval pub range_start: Uint256, + /// distribution range end in a [0, 2**256) interval pub range_end: Uint256, + /// block-commit from the miner candidate pub candidate: LeaderBlockCommitOp, } @@ -94,11 +101,25 @@ impl LinkedCommitIdentifier { } impl BurnSamplePoint { + pub fn zero(candidate: LeaderBlockCommitOp) -> Self { + Self { + burns: 0, + median_burn: 0, + frequency: 0, + range_start: Uint256::zero(), + range_end: Uint256::zero(), + candidate, + } + } + fn sanity_check_window( + miner_commitment_window: u8, block_commits: &Vec>, missed_commits: &Vec>, ) { - assert!(block_commits.len() <= (MINING_COMMITMENT_WINDOW as usize)); + assert!( + block_commits.len() <= usize::try_from(miner_commitment_window).expect("infallible") + ); assert_eq!(missed_commits.len() + 1, block_commits.len()); let mut block_height_at_index = None; for (index, commits) in block_commits.iter().enumerate() { @@ -151,6 +172,7 @@ impl BurnSamplePoint { /// `OP_RETURN` payload. The length of this vector must be equal to the length of the /// `block_commits` vector. `burn_blocks[i]` is `true` if the `ith` block-commit must be PoB. pub fn make_min_median_distribution( + mining_commitment_window: u8, mut block_commits: Vec>, mut missed_commits: Vec>, burn_blocks: Vec, @@ -158,7 +180,11 @@ impl BurnSamplePoint { // sanity check let window_size = block_commits.len() as u8; assert!(window_size > 0); - BurnSamplePoint::sanity_check_window(&block_commits, &missed_commits); + BurnSamplePoint::sanity_check_window( + mining_commitment_window, + &block_commits, + &missed_commits, + ); assert_eq!(burn_blocks.len(), block_commits.len()); // first, let's link all of the current block commits to the priors @@ -268,6 +294,17 @@ impl BurnSamplePoint { }; let burns = cmp::min(median_burn, most_recent_burn); + + let frequency = linked_commits.iter().fold(0u8, |count, commit_opt| { + if commit_opt.is_some() { + count + .checked_add(1) + .expect("infallable -- commit window exceeds u8::MAX") + } else { + count + } + }); + let candidate = if let LinkedCommitIdentifier::Valid(op) = linked_commits.remove(0).unwrap().op { @@ -281,11 +318,13 @@ impl BurnSamplePoint { "txid" => %candidate.txid.to_string(), "most_recent_burn" => %most_recent_burn, "median_burn" => %median_burn, + "frequency" => frequency, "all_burns" => %format!("{:?}", all_burns)); BurnSamplePoint { burns, median_burn, + frequency, range_start: Uint256::zero(), // To be filled in range_end: Uint256::zero(), // To be filled in candidate, @@ -324,14 +363,6 @@ impl BurnSamplePoint { } } - #[cfg(test)] - pub fn make_distribution( - all_block_candidates: Vec, - _consumed_leader_keys: Vec, - ) -> Vec { - Self::make_min_median_distribution(vec![all_block_candidates], vec![], vec![true]) - } - /// Calculate the ranges between 0 and 2**256 - 1 over which each point in the burn sample /// applies, so we can later select which block to use. fn make_sortition_ranges(burn_sample: &mut Vec) -> () { @@ -423,6 +454,21 @@ mod tests { use crate::chainstate::stacks::StacksPublicKey; use crate::core::MINING_COMMITMENT_WINDOW; + impl BurnSamplePoint { + pub fn make_distribution( + mining_commitment_window: u8, + all_block_candidates: Vec, + _consumed_leader_keys: Vec, + ) -> Vec { + Self::make_min_median_distribution( + mining_commitment_window, + vec![all_block_candidates], + vec![], + vec![true], + ) + } + } + struct BurnDistFixture { consumed_leader_keys: Vec, block_commits: Vec, @@ -466,6 +512,7 @@ mod tests { let input_txid = Txid(input_txid); LeaderBlockCommitOp { + treatment: vec![], block_header_hash: BlockHeaderHash(block_header_hash), new_seed: VRFSeed([0; 32]), parent_block_ptr: (block_id - 1) as u32, @@ -531,6 +578,7 @@ mod tests { ]; let mut result = BurnSamplePoint::make_min_median_distribution( + MINING_COMMITMENT_WINDOW, commits.clone(), vec![vec![]; (MINING_COMMITMENT_WINDOW - 1) as usize], vec![false, false, false, true, true, true], @@ -564,6 +612,7 @@ mod tests { // miner 2 => min = 1, median = 3, last_burn = 3 let mut result = BurnSamplePoint::make_min_median_distribution( + MINING_COMMITMENT_WINDOW, commits.clone(), vec![vec![]; (MINING_COMMITMENT_WINDOW - 1) as usize], vec![false, false, false, true, true, true], @@ -624,6 +673,7 @@ mod tests { ]; let mut result = BurnSamplePoint::make_min_median_distribution( + MINING_COMMITMENT_WINDOW, commits.clone(), vec![vec![]; (MINING_COMMITMENT_WINDOW - 1) as usize], vec![false, false, false, false, false, false], @@ -677,6 +727,7 @@ mod tests { ]; let mut result = BurnSamplePoint::make_min_median_distribution( + MINING_COMMITMENT_WINDOW, commits.clone(), vec![vec![]; (MINING_COMMITMENT_WINDOW - 1) as usize], vec![false, false, false, false, false, false], @@ -733,6 +784,7 @@ mod tests { ]; let mut result = BurnSamplePoint::make_min_median_distribution( + MINING_COMMITMENT_WINDOW, commits.clone(), missed_commits.clone(), vec![false, false, false, false, false, false], @@ -833,6 +885,7 @@ mod tests { }; let block_commit_1 = LeaderBlockCommitOp { + treatment: vec![], sunset_burn: 0, block_header_hash: BlockHeaderHash::from_bytes( &hex_bytes("2222222222222222222222222222222222222222222222222222222222222222") @@ -878,6 +931,7 @@ mod tests { }; let block_commit_2 = LeaderBlockCommitOp { + treatment: vec![], sunset_burn: 0, block_header_hash: BlockHeaderHash::from_bytes( &hex_bytes("2222222222222222222222222222222222222222222222222222222222222223") @@ -923,6 +977,7 @@ mod tests { }; let block_commit_3 = LeaderBlockCommitOp { + treatment: vec![], sunset_burn: 0, block_header_hash: BlockHeaderHash::from_bytes( &hex_bytes("2222222222222222222222222222222222222222222222222222222222222224") @@ -998,6 +1053,7 @@ mod tests { median_burn: block_commit_1.burn_fee.into(), range_start: Uint256::zero(), range_end: Uint256::max(), + frequency: 1, candidate: block_commit_1.clone(), }], }, @@ -1016,12 +1072,14 @@ mod tests { 0xffffffffffffffff, 0x7fffffffffffffff, ]), + frequency: 1, candidate: block_commit_1.clone(), }, BurnSamplePoint { burns: block_commit_2.burn_fee.into(), median_burn: ((block_commit_1.burn_fee + block_commit_2.burn_fee) / 2) .into(), + frequency: 1, range_start: Uint256([ 0xffffffffffffffff, 0xffffffffffffffff, @@ -1041,6 +1099,7 @@ mod tests { burns: block_commit_1.burn_fee.into(), median_burn: ((block_commit_1.burn_fee + block_commit_2.burn_fee) / 2) .into(), + frequency: 1, range_start: Uint256::zero(), range_end: Uint256([ 0xffffffffffffffff, @@ -1054,6 +1113,7 @@ mod tests { burns: block_commit_2.burn_fee.into(), median_burn: ((block_commit_1.burn_fee + block_commit_2.burn_fee) / 2) .into(), + frequency: 1, range_start: Uint256([ 0xffffffffffffffff, 0xffffffffffffffff, @@ -1073,6 +1133,7 @@ mod tests { burns: block_commit_1.burn_fee.into(), median_burn: ((block_commit_1.burn_fee + block_commit_2.burn_fee) / 2) .into(), + frequency: 1, range_start: Uint256::zero(), range_end: Uint256([ 0xffffffffffffffff, @@ -1086,6 +1147,7 @@ mod tests { burns: block_commit_2.burn_fee.into(), median_burn: ((block_commit_1.burn_fee + block_commit_2.burn_fee) / 2) .into(), + frequency: 1, range_start: Uint256([ 0xffffffffffffffff, 0xffffffffffffffff, @@ -1105,6 +1167,7 @@ mod tests { burns: block_commit_1.burn_fee.into(), median_burn: ((block_commit_1.burn_fee + block_commit_2.burn_fee) / 2) .into(), + frequency: 1, range_start: Uint256::zero(), range_end: Uint256([ 0xffffffffffffffff, @@ -1118,6 +1181,7 @@ mod tests { burns: block_commit_2.burn_fee.into(), median_burn: ((block_commit_1.burn_fee + block_commit_2.burn_fee) / 2) .into(), + frequency: 1, range_start: Uint256([ 0xffffffffffffffff, 0xffffffffffffffff, @@ -1137,6 +1201,7 @@ mod tests { burns: block_commit_1.burn_fee.into(), median_burn: ((block_commit_1.burn_fee + block_commit_2.burn_fee) / 2) .into(), + frequency: 1, range_start: Uint256::zero(), range_end: Uint256([ 0xffffffffffffffff, @@ -1150,6 +1215,7 @@ mod tests { burns: block_commit_2.burn_fee.into(), median_burn: ((block_commit_1.burn_fee + block_commit_2.burn_fee) / 2) .into(), + frequency: 1, range_start: Uint256([ 0xffffffffffffffff, 0xffffffffffffffff, @@ -1169,6 +1235,7 @@ mod tests { burns: block_commit_1.burn_fee.into(), median_burn: ((block_commit_1.burn_fee + block_commit_2.burn_fee) / 2) .into(), + frequency: 1, range_start: Uint256::zero(), range_end: Uint256([ 0xffffffffffffffff, @@ -1182,6 +1249,7 @@ mod tests { burns: block_commit_2.burn_fee.into(), median_burn: ((block_commit_1.burn_fee + block_commit_2.burn_fee) / 2) .into(), + frequency: 1, range_start: Uint256([ 0xffffffffffffffff, 0xffffffffffffffff, @@ -1208,6 +1276,7 @@ mod tests { BurnSamplePoint { burns: block_commit_1.burn_fee.into(), median_burn: block_commit_2.burn_fee.into(), + frequency: 1, range_start: Uint256::zero(), range_end: Uint256([ 0x3ed94d3cb0a84709, @@ -1220,6 +1289,7 @@ mod tests { BurnSamplePoint { burns: block_commit_2.burn_fee.into(), median_burn: block_commit_2.burn_fee.into(), + frequency: 1, range_start: Uint256([ 0x3ed94d3cb0a84709, 0x0963dded799a7c1a, @@ -1237,6 +1307,7 @@ mod tests { BurnSamplePoint { burns: (block_commit_3.burn_fee).into(), median_burn: block_commit_3.burn_fee.into(), + frequency: 1, range_start: Uint256([ 0x7db29a7961508e12, 0x12c7bbdaf334f834, @@ -1254,6 +1325,7 @@ mod tests { let f = &fixtures[i]; eprintln!("Fixture #{}", i); let dist = BurnSamplePoint::make_distribution( + MINING_COMMITMENT_WINDOW, f.block_commits.iter().cloned().collect(), f.consumed_leader_keys.iter().cloned().collect(), ); diff --git a/stackslib/src/chainstate/burn/mod.rs b/stackslib/src/chainstate/burn/mod.rs index b764344eb64..be92c3088f8 100644 --- a/stackslib/src/chainstate/burn/mod.rs +++ b/stackslib/src/chainstate/burn/mod.rs @@ -38,6 +38,7 @@ use crate::chainstate::burn::db::sortdb::SortitionHandleTx; use crate::core::SYSTEM_FORK_SET_VERSION; use crate::util_lib::db::Error as db_error; +pub mod atc; /// This module contains the code for processing the burn chain state database pub mod db; pub mod distribution; @@ -223,7 +224,7 @@ impl Opcodes { } impl OpsHash { - pub fn from_txids(txids: &Vec) -> OpsHash { + pub fn from_txids(txids: &[Txid]) -> OpsHash { // NOTE: unlike stacks v1, we calculate the ops hash simply // from a hash-chain of txids. There is no weird serialization // of operations, and we don't construct a merkle tree over diff --git a/stackslib/src/chainstate/burn/operations/leader_block_commit.rs b/stackslib/src/chainstate/burn/operations/leader_block_commit.rs index 087a3e3b425..cea03d44353 100644 --- a/stackslib/src/chainstate/burn/operations/leader_block_commit.rs +++ b/stackslib/src/chainstate/burn/operations/leader_block_commit.rs @@ -59,6 +59,33 @@ struct ParsedData { memo: u8, } +/// This struct captures how a particular +/// PoxAddress was treated by a given block commit. +#[derive(Debug, PartialEq, Clone, Eq, Serialize, Deserialize)] +pub enum Treatment { + Reward(PoxAddress), + Punish(PoxAddress), +} + +impl Treatment { + pub fn is_reward(&self) -> bool { + matches!(self, Treatment::Reward(_)) + } + pub fn is_punish(&self) -> bool { + matches!(self, Treatment::Punish(_)) + } +} + +impl std::ops::Deref for Treatment { + type Target = PoxAddress; + + fn deref(&self) -> &Self::Target { + match self { + Treatment::Reward(ref a) | Treatment::Punish(ref a) => a, + } + } +} + pub static OUTPUTS_PER_COMMIT: usize = 2; pub static BURN_BLOCK_MINED_AT_MODULUS: u64 = 5; @@ -100,6 +127,7 @@ impl LeaderBlockCommitOp { txid: Txid([0u8; 32]), vtxindex: 0, burn_header_hash: BurnchainHeaderHash::zero(), + treatment: vec![], } } @@ -138,6 +166,7 @@ impl LeaderBlockCommitOp { - 1, burn_header_hash: BurnchainHeaderHash::zero(), + treatment: vec![], } } @@ -425,6 +454,7 @@ impl LeaderBlockCommitOp { input, apparent_sender, + treatment: Vec::new(), txid: tx.txid(), vtxindex: tx.vtxindex(), block_height: block_height, @@ -484,10 +514,11 @@ impl StacksMessageCodec for LeaderBlockCommitOp { } } -#[derive(Debug)] +#[derive(Debug, Clone)] pub struct RewardSetInfo { pub anchor_block: BlockHeaderHash, pub recipients: Vec<(PoxAddress, u16)>, + pub allow_nakamoto_punishment: bool, } #[derive(Debug, Clone)] @@ -518,8 +549,18 @@ impl RewardSetInfo { .get_reward_set_payouts_at(&intended_sortition) .map_err(|_e| op_error::BlockCommitBadOutputs)? .0; + let block_height = SortitionDB::get_block_snapshot(tx.tx(), intended_sortition) + .map_err(|_e| op_error::BlockCommitBadOutputs)? + .ok_or_else(|| op_error::BlockCommitBadOutputs)? + .block_height; + let allow_nakamoto_punishment = SortitionDB::get_stacks_epoch(tx.sqlite(), block_height) + .map_err(|_e| op_error::BlockCommitBadOutputs)? + .ok_or_else(|| op_error::BlockCommitBadOutputs)? + .epoch_id + .allows_pox_punishment(); Ok(tx.get_last_anchor_block_hash()?.map(|bhh| RewardSetInfo { + allow_nakamoto_punishment, anchor_block: bhh, recipients: intended_recipients .into_iter() @@ -554,16 +595,21 @@ impl RewardSetInfo { impl LeaderBlockCommitOp { /// Perform PoX checks on this block-commit, given the reward set info (which may be None if /// PoX is not active). + /// + /// If PoX was active (i.e., `reward_set_info` is `Some`), this method will return how the + /// PoX addresses were treated by the block commit. Prior to Epoch 3.0, these will be all + /// treated with rewards (attempting to punish pre-nakamoto will result in a op_error). + /// /// If `reward_set_info` is not None, then *only* the addresses in .recipients are used. The u16 /// indexes are *ignored* (and *must be* ignored, since this method gets called by /// `check_intneded_sortition()`, which does not have this information). - fn check_pox( + fn check_pox( &self, epoch_id: StacksEpochId, burnchain: &Burnchain, - tx: &mut SortitionHandleTx, + tx: &mut SH, reward_set_info: Option<&RewardSetInfo>, - ) -> Result<(), op_error> { + ) -> Result, op_error> { let parent_block_height = u64::from(self.parent_block_ptr); if PoxConstants::has_pox_sunset(epoch_id) { @@ -595,110 +641,160 @@ impl LeaderBlockCommitOp { // the commit outputs must = the expected set of commit outputs. // * otherwise, the commit outputs must be burn outputs. ///////////////////////////////////////////////////////////////////////////////////// - if let Some(reward_set_info) = reward_set_info { - // we do some check-inversion here so that we check the commit_outs _before_ - // we check whether or not the block is descended from the anchor. - // we do this because the descended_from check isn't particularly cheap, so - // we want to make sure that any TX that forces us to perform the check - // has either burned BTC or sent BTC to the PoX recipients - - // if we're in the prepare phase, then this block-commit _must_ burn. - // No PoX descent check needs to be performed -- prepare-phase block commits - // stand alone. - if burnchain.is_in_prepare_phase(self.block_height) { - if let Err(e) = self.check_prepare_commit_burn() { - warn!("Invalid block commit: in block {} which is in the prepare phase, but did not burn to a single output as expected ({:?})", self.block_height, &e); - return Err(op_error::BlockCommitBadOutputs); - } - } else { - // Not in prepare phase, so this can be either PoB or PoX (a descent check from the - // anchor block will be necessary if the block-commit is well-formed). - // - // first, handle a corner case: - // all of the commitment outputs are _burns_ - // _and_ the reward set chose two burn addresses as reward addresses. - // then, don't need to do a pox descendant check. - let recipient_set_all_burns = reward_set_info + let Some(reward_set_info) = reward_set_info else { + // no recipient info for this sortition, so expect all burns + if !self.all_outputs_burn() { + warn!("Invalid block commit: this transaction should only have burn outputs."); + return Err(op_error::BlockCommitBadOutputs); + } + return Ok(vec![]); + }; + + // we do some check-inversion here so that we check the commit_outs _before_ + // we check whether or not the block is descended from the anchor. + // we do this because the descended_from check isn't particularly cheap, so + // we want to make sure that any TX that forces us to perform the check + // has either burned BTC or sent BTC to the PoX recipients + + // if we're in the prepare phase, then this block-commit _must_ burn. + // No PoX descent check needs to be performed -- prepare-phase block commits + // stand alone. + if burnchain.is_in_prepare_phase(self.block_height) { + if let Err(e) = self.check_prepare_commit_burn() { + warn!("Invalid block commit: in block {} which is in the prepare phase, but did not burn to a single output as expected ({:?})", self.block_height, &e); + return Err(op_error::BlockCommitBadOutputs); + } + return Ok(vec![]); + } + + // Not in prepare phase, so this can be either PoB or PoX (a descent check from the + // anchor block will be necessary if the block-commit is well-formed). + // + // first, handle a corner case: + // all of the commitment outputs are _burns_ + // _and_ the reward set chose two burn addresses as reward addresses. + // then, don't need to do a pox descendant check. + let recipient_set_all_burns = reward_set_info + .recipients + .iter() + .fold(true, |prior_is_burn, (addr, ..)| { + prior_is_burn && addr.is_burn() + }); + + if recipient_set_all_burns { + if !self.all_outputs_burn() { + warn!("Invalid block commit: recipient set should be all burns"); + return Err(op_error::BlockCommitBadOutputs); + } + return Ok(vec![]); + } + + // Now, we are checking the reward sets match, and if they don't, + // whether or not pox descendant is necessary + + // first, if we're in a nakamoto epoch, any block commit building directly off of the anchor block + // is descendant + let directly_descended_from_anchor = epoch_id.block_commits_to_parent() + && self.block_header_hash == reward_set_info.anchor_block; + let descended_from_anchor = directly_descended_from_anchor || tx + .descended_from(parent_block_height, &reward_set_info.anchor_block) + .map_err(|e| { + error!("Failed to check whether parent (height={}) is descendent of anchor block={}: {}", + parent_block_height, &reward_set_info.anchor_block, e); + op_error::BlockCommitAnchorCheck + })?; + + if self.all_outputs_burn() { + // If we're not descended from the anchor, then great, this is just a "normal" non-descendant burn commit + // But, if we are descended from the anchor and nakamoto pox punishments are allowed, this commit may have + // been a double punishment + if !descended_from_anchor { + return Ok(vec![]); + } + if reward_set_info.allow_nakamoto_punishment { + // all non-burn recipients were punished -- when we do the block processing + // enforcement check, "burn recipients" can be treated as 1 or a 0 in the + // bitvec interchangeably (whether they are punished or not doesn't matter). + let punished = reward_set_info .recipients .iter() - .fold(true, |prior_is_burn, (addr, ..)| { - prior_is_burn && addr.is_burn() - }); - - if recipient_set_all_burns { - if !self.all_outputs_burn() { - warn!("Invalid block commit: recipient set should be all burns"); - return Err(op_error::BlockCommitBadOutputs); - } - } else { - let expect_pox_descendant = if self.all_outputs_burn() { - false - } else { - let mut check_recipients: Vec<_> = reward_set_info - .recipients - .iter() - .map(|(addr, ..)| addr.clone()) - .collect(); - - if check_recipients.len() == 1 { - // If the number of recipients in the set was odd, we need to pad - // with a burn address. - // NOTE: this used the old burnchain.is_mainnet() code, which always - // returns false - check_recipients.push(PoxAddress::standard_burn_address(false)) - } + .map(|(addr, _)| Treatment::Punish(addr.clone())) + .collect(); + return Ok(punished); + } else { + warn!( + "Invalid block commit: descended from PoX anchor {}, but used burn outputs", + &reward_set_info.anchor_block + ); + return Err(op_error::BlockCommitBadOutputs); + } + } else { + let mut check_recipients: Vec<_> = reward_set_info + .recipients + .iter() + .map(|(addr, ix)| (addr.clone(), *ix)) + .collect(); - if self.commit_outs.len() != check_recipients.len() { - warn!( - "Invalid block commit: expected {} PoX transfers, but commit has {}", - reward_set_info.recipients.len(), - self.commit_outs.len() - ); - return Err(op_error::BlockCommitBadOutputs); - } + if check_recipients.len() == 1 { + // If the number of recipients in the set was odd, we need to pad + // with a burn address. + // NOTE: this used the old burnchain.is_mainnet() code, which always + // returns false + check_recipients.push((PoxAddress::standard_burn_address(false), 0)) + } - // sort check_recipients and commit_outs so that we can perform an - // iterative equality check - check_recipients.sort(); - let mut commit_outs = self.commit_outs.clone(); - commit_outs.sort(); - for (expected_commit, found_commit) in - commit_outs.iter().zip(check_recipients) - { - if expected_commit.to_burnchain_repr() - != found_commit.to_burnchain_repr() - { - warn!("Invalid block commit: committed output {} does not match expected {}", - found_commit.to_burnchain_repr(), expected_commit.to_burnchain_repr()); - return Err(op_error::BlockCommitBadOutputs); - } - } - true - }; + if self.commit_outs.len() != check_recipients.len() { + warn!( + "Invalid block commit: expected {} PoX transfers, but commit has {}", + reward_set_info.recipients.len(), + self.commit_outs.len() + ); + return Err(op_error::BlockCommitBadOutputs); + } - let descended_from_anchor = tx.descended_from(parent_block_height, &reward_set_info.anchor_block) - .map_err(|e| { - error!("Failed to check whether parent (height={}) is descendent of anchor block={}: {}", - parent_block_height, &reward_set_info.anchor_block, e); - op_error::BlockCommitAnchorCheck})?; - if descended_from_anchor != expect_pox_descendant { - if descended_from_anchor { - warn!("Invalid block commit: descended from PoX anchor {}, but used burn outputs", &reward_set_info.anchor_block); - } else { - warn!("Invalid block commit: not descended from PoX anchor {}, but used PoX outputs", &reward_set_info.anchor_block); - } + // we've checked length equality, so we can just iterate through + // self.commit_outs and check if each is in `check_recipients` + // *OR* if `allows_pox_punishment`, then it could be a burn. + // NOTE: we do a find and remove here so that the same recipient + // isn't found multiple times by different commit_outs. + let mut rewarded = vec![]; + for self_commit in self.commit_outs.iter() { + let search_predicate = self_commit.to_burnchain_repr(); + let found = check_recipients + .iter() + .enumerate() + .find(|(_, (check_commit, _))| { + search_predicate == check_commit.to_burnchain_repr() + }); + if let Some((index, _)) = found { + rewarded.push(Treatment::Reward(check_recipients.remove(index).0)); + } else { + // if we didn't find the pox output, then maybe its a pox punishment? + if reward_set_info.allow_nakamoto_punishment && self_commit.is_burn() { + continue; + } else { + warn!("Invalid block commit: committed output {} does not match expected recipient set: {:?}", + self_commit.to_burnchain_repr(), check_recipients); return Err(op_error::BlockCommitBadOutputs); } - } + }; } - } else { - // no recipient info for this sortition, so expect all burns - if !self.all_outputs_burn() { - warn!("Invalid block commit: this transaction should only have burn outputs."); + + if !descended_from_anchor { + warn!( + "Invalid block commit: not descended from PoX anchor {}, but used PoX outputs", + &reward_set_info.anchor_block + ); return Err(op_error::BlockCommitBadOutputs); } - }; - Ok(()) + + let mut treated_outputs: Vec<_> = check_recipients + .into_iter() + .map(|x| Treatment::Punish(x.0)) + .collect(); + treated_outputs.extend(rewarded); + return Ok(treated_outputs); + } } fn check_single_burn_output(&self) -> Result<(), op_error> { @@ -962,8 +1058,9 @@ impl LeaderBlockCommitOp { Ok(()) } + /// Returns Ok() and a vector of PoxAddresses which were punished by this op pub fn check( - &self, + &mut self, burnchain: &Burnchain, tx: &mut SortitionHandleTx, reward_set_info: Option<&RewardSetInfo>, @@ -1017,7 +1114,7 @@ impl LeaderBlockCommitOp { return Err(op_error::MissedBlockCommit(missed_data)); } - if burnchain + let punished = if burnchain .pox_constants .is_after_pox_sunset_end(self.block_height, epoch.epoch_id) { @@ -1027,6 +1124,7 @@ impl LeaderBlockCommitOp { "apparent_sender" => %apparent_sender_repr); e })?; + vec![] } else { // either in epoch 2.1, or the PoX sunset hasn't completed yet self.check_pox(epoch.epoch_id, burnchain, tx, reward_set_info) @@ -1034,11 +1132,15 @@ impl LeaderBlockCommitOp { warn!("Invalid block-commit: bad PoX: {:?}", &e; "apparent_sender" => %apparent_sender_repr); e - })?; - } + })? + }; self.check_common(epoch.epoch_id, tx)?; + if reward_set_info.is_some_and(|r| r.allow_nakamoto_punishment) { + self.treatment = punished; + } + // good to go! Ok(()) } @@ -1062,7 +1164,9 @@ mod tests { use crate::burnchains::bitcoin::keys::BitcoinPublicKey; use crate::burnchains::bitcoin::*; use crate::burnchains::*; - use crate::chainstate::burn::db::sortdb::tests::test_append_snapshot; + use crate::chainstate::burn::db::sortdb::tests::{ + test_append_snapshot, test_append_snapshot_with_winner, + }; use crate::chainstate::burn::db::sortdb::*; use crate::chainstate::burn::db::*; use crate::chainstate::burn::operations::*; @@ -1662,7 +1766,7 @@ mod tests { block_height: block_height, burn_parent_modulus: ((block_height - 1) % BURN_BLOCK_MINED_AT_MODULUS) as u8, burn_header_hash: burn_header_hash, - }) + treatment: vec![], }) }, OpFixture { // invalid -- wrong opcode @@ -1896,6 +2000,7 @@ mod tests { commit_outs: vec![], burn_fee: 12345, + treatment: vec![], input: (Txid([0; 32]), 0), apparent_sender: BurnchainSigner::mock_parts( AddressHashMode::SerializeP2PKH, @@ -2025,11 +2130,12 @@ mod tests { prev_snapshot.index_root.clone() }; - let fixtures = vec![ + let mut fixtures = vec![ CheckFixture { // accept -- consumes leader_key_2 op: LeaderBlockCommitOp { sunset_burn: 0, + treatment: vec![], block_header_hash: BlockHeaderHash::from_bytes( &hex_bytes( "2222222222222222222222222222222222222222222222222222222222222222", @@ -2079,6 +2185,7 @@ mod tests { CheckFixture { // accept -- builds directly off of genesis block and consumes leader_key_2 op: LeaderBlockCommitOp { + treatment: vec![], sunset_burn: 0, block_header_hash: BlockHeaderHash::from_bytes( &hex_bytes( @@ -2129,6 +2236,7 @@ mod tests { CheckFixture { // accept -- also consumes leader_key_1 op: LeaderBlockCommitOp { + treatment: vec![], sunset_burn: 0, block_header_hash: BlockHeaderHash::from_bytes( &hex_bytes( @@ -2179,6 +2287,7 @@ mod tests { CheckFixture { // reject -- bad burn parent modulus op: LeaderBlockCommitOp { + treatment: vec![], sunset_burn: 0, block_header_hash: BlockHeaderHash::from_bytes( &hex_bytes( @@ -2241,6 +2350,7 @@ mod tests { CheckFixture { // reject -- bad burn parent modulus op: LeaderBlockCommitOp { + treatment: vec![], sunset_burn: 0, block_header_hash: BlockHeaderHash::from_bytes( &hex_bytes( @@ -2292,7 +2402,7 @@ mod tests { }, ]; - for (ix, fixture) in fixtures.iter().enumerate() { + for (ix, fixture) in fixtures.iter_mut().enumerate() { eprintln!("Processing {}", ix); let header = BurnchainBlockHeader { block_height: fixture.op.block_height, @@ -2412,6 +2522,7 @@ mod tests { // consumes leader_key_1 let block_commit_1 = LeaderBlockCommitOp { sunset_burn: 0, + treatment: vec![], block_header_hash: BlockHeaderHash::from_bytes( &hex_bytes("2222222222222222222222222222222222222222222222222222222222222222") .unwrap(), @@ -2556,10 +2667,11 @@ mod tests { let block_height = 124; - let fixtures = vec![ + let mut fixtures = vec![ CheckFixture { // reject -- predates start block op: LeaderBlockCommitOp { + treatment: vec![], sunset_burn: 0, block_header_hash: BlockHeaderHash::from_bytes( &hex_bytes( @@ -2611,6 +2723,7 @@ mod tests { // reject -- no such leader key op: LeaderBlockCommitOp { sunset_burn: 0, + treatment: vec![], block_header_hash: BlockHeaderHash::from_bytes( &hex_bytes( "2222222222222222222222222222222222222222222222222222222222222222", @@ -2660,6 +2773,7 @@ mod tests { CheckFixture { // reject -- previous block must exist op: LeaderBlockCommitOp { + treatment: vec![], sunset_burn: 0, block_header_hash: BlockHeaderHash::from_bytes( &hex_bytes( @@ -2710,6 +2824,7 @@ mod tests { CheckFixture { // reject -- previous block must exist in a different block op: LeaderBlockCommitOp { + treatment: vec![], sunset_burn: 0, block_header_hash: BlockHeaderHash::from_bytes( &hex_bytes( @@ -2763,6 +2878,7 @@ mod tests { // here) op: LeaderBlockCommitOp { sunset_burn: 0, + treatment: vec![], block_header_hash: BlockHeaderHash::from_bytes( &hex_bytes( "2222222222222222222222222222222222222222222222222222222222222222", @@ -2812,6 +2928,7 @@ mod tests { CheckFixture { // reject -- fee is 0 op: LeaderBlockCommitOp { + treatment: vec![], sunset_burn: 0, block_header_hash: BlockHeaderHash::from_bytes( &hex_bytes( @@ -2862,6 +2979,7 @@ mod tests { CheckFixture { // accept -- consumes leader_key_2 op: LeaderBlockCommitOp { + treatment: vec![], sunset_burn: 0, block_header_hash: BlockHeaderHash::from_bytes( &hex_bytes( @@ -2912,6 +3030,7 @@ mod tests { CheckFixture { // accept -- builds directly off of genesis block and consumes leader_key_2 op: LeaderBlockCommitOp { + treatment: vec![], sunset_burn: 0, block_header_hash: BlockHeaderHash::from_bytes( &hex_bytes( @@ -2962,6 +3081,7 @@ mod tests { CheckFixture { // accept -- also consumes leader_key_1 op: LeaderBlockCommitOp { + treatment: vec![], sunset_burn: 0, block_header_hash: BlockHeaderHash::from_bytes( &hex_bytes( @@ -3011,7 +3131,7 @@ mod tests { }, ]; - for (ix, fixture) in fixtures.iter().enumerate() { + for (ix, fixture) in fixtures.iter_mut().enumerate() { eprintln!("Processing {}", ix); let header = BurnchainBlockHeader { block_height: fixture.op.block_height, @@ -3032,6 +3152,315 @@ mod tests { } } + pub enum DescendencyStubbedSortitionHandle { + Descended, + NotDescended, + } + + impl SortitionHandle for DescendencyStubbedSortitionHandle { + fn sqlite(&self) -> &Connection { + panic!("Cannot evaluate"); + } + + fn get_block_snapshot_by_height( + &mut self, + _block_height: u64, + ) -> Result, db_error> { + panic!("Cannot evaluate"); + } + + fn first_burn_block_height(&self) -> u64 { + panic!("Cannot evaluate"); + } + + fn pox_constants(&self) -> &PoxConstants { + panic!("Cannot evaluate"); + } + + fn tip(&self) -> SortitionId { + panic!("Cannot evaluate"); + } + + fn get_nakamoto_tip( + &self, + ) -> Result, db_error> { + panic!("Cannot evaluate"); + } + + fn descended_from( + &mut self, + _block_at_burn_height: u64, + _potential_ancestor: &BlockHeaderHash, + ) -> Result { + match self { + DescendencyStubbedSortitionHandle::Descended => Ok(true), + DescendencyStubbedSortitionHandle::NotDescended => Ok(false), + } + } + } + + #[test] + fn pox_reward_punish() { + let burnchain = Burnchain { + pox_constants: pox_constants(), + peer_version: 0x012345678, + network_id: 0x9abcdef0, + chain_name: "bitcoin".to_string(), + network_name: "testnet".to_string(), + working_dir: "/nope".to_string(), + consensus_hash_lifetime: 24, + stable_confirmations: 7, + initial_reward_start_block: 0, + first_block_height: 0, + first_block_timestamp: 0, + first_block_hash: BurnchainHeaderHash([0x05; 32]), + }; + + let default_block_commit = LeaderBlockCommitOp { + treatment: vec![], + sunset_burn: 0, + block_header_hash: BlockHeaderHash([0x22; 32]), + new_seed: VRFSeed([0x33; 32]), + parent_block_ptr: 125, + parent_vtxindex: 0, + key_block_ptr: 124, + key_vtxindex: 456, + memo: vec![0x80], + commit_outs: vec![], + + burn_fee: 12345, + input: (Txid([0; 32]), 0), + apparent_sender: BurnchainSigner::mock_parts( + AddressHashMode::SerializeP2PKH, + 1, + vec![StacksPublicKey::from_hex( + "02d8015134d9db8178ac93acbc43170a2f20febba5087a5b0437058765ad5133d0", + ) + .unwrap()], + ), + + txid: Txid([0xab; 32]), + vtxindex: 444, + block_height: 128, + burn_parent_modulus: (128 % BURN_BLOCK_MINED_AT_MODULUS) as u8, + burn_header_hash: BurnchainHeaderHash([0x11; 32]), + }; + + let anchor_block_hash = BlockHeaderHash([0xaa; 32]); + + fn reward_addrs(i: usize) -> PoxAddress { + let addr = StacksAddress::new(1, Hash160::from_data(&i.to_be_bytes())); + PoxAddress::Standard(addr, None) + } + let burn_addr_0 = PoxAddress::Standard(StacksAddress::burn_address(false), None); + let burn_addr_1 = PoxAddress::Standard(StacksAddress::burn_address(true), None); + let rs_pox_addrs = RewardSetInfo { + anchor_block: anchor_block_hash.clone(), + recipients: vec![(reward_addrs(0), 0), (reward_addrs(1), 1)], + allow_nakamoto_punishment: true, + }; + let rs_pox_addrs_0b = RewardSetInfo { + anchor_block: anchor_block_hash.clone(), + recipients: vec![(reward_addrs(0), 0), (burn_addr_0.clone(), 5)], + allow_nakamoto_punishment: true, + }; + let rs_pox_addrs_1b = RewardSetInfo { + anchor_block: anchor_block_hash.clone(), + recipients: vec![(reward_addrs(1), 1), (burn_addr_1.clone(), 5)], + allow_nakamoto_punishment: true, + }; + + fn rev(rs: &RewardSetInfo) -> RewardSetInfo { + let mut out = rs.clone(); + out.recipients.reverse(); + out + } + + fn no_punish(rs: &RewardSetInfo) -> RewardSetInfo { + let mut out = rs.clone(); + out.allow_nakamoto_punishment = false; + out + } + + let mut test_vectors = vec![ + ( + LeaderBlockCommitOp { + commit_outs: vec![burn_addr_0.clone(), burn_addr_1.clone()], + ..default_block_commit.clone() + }, + None, + Ok(vec![]), + ), + ( + LeaderBlockCommitOp { + commit_outs: vec![burn_addr_0.clone(), burn_addr_1.clone()], + ..default_block_commit.clone() + }, + Some(rs_pox_addrs.clone()), + Ok(vec![ + Treatment::Punish(reward_addrs(1)), + Treatment::Punish(reward_addrs(0)), + ]), + ), + ( + LeaderBlockCommitOp { + commit_outs: vec![reward_addrs(0), burn_addr_1.clone()], + ..default_block_commit.clone() + }, + Some(rs_pox_addrs.clone()), + Ok(vec![ + Treatment::Punish(reward_addrs(1)), + Treatment::Reward(reward_addrs(0)), + ]), + ), + ( + LeaderBlockCommitOp { + commit_outs: vec![reward_addrs(1), burn_addr_1.clone()], + ..default_block_commit.clone() + }, + Some(rs_pox_addrs.clone()), + Ok(vec![ + Treatment::Punish(reward_addrs(0)), + Treatment::Reward(reward_addrs(1)), + ]), + ), + ( + LeaderBlockCommitOp { + commit_outs: vec![reward_addrs(0), reward_addrs(1)], + ..default_block_commit.clone() + }, + Some(rs_pox_addrs.clone()), + Ok(vec![ + Treatment::Reward(reward_addrs(1)), + Treatment::Reward(reward_addrs(0)), + ]), + ), + ( + LeaderBlockCommitOp { + commit_outs: vec![burn_addr_0.clone(), burn_addr_1.clone()], + ..default_block_commit.clone() + }, + Some(rs_pox_addrs.clone()), + Ok(vec![ + Treatment::Punish(reward_addrs(1)), + Treatment::Punish(reward_addrs(0)), + ]), + ), + ( + LeaderBlockCommitOp { + commit_outs: vec![burn_addr_0.clone(), burn_addr_1.clone()], + ..default_block_commit.clone() + }, + Some(rs_pox_addrs_1b.clone()), + // it doesn't matter if we call burn_addr_1 punished or rewarded! + Ok(vec![ + Treatment::Punish(reward_addrs(1)), + Treatment::Punish(burn_addr_1.clone()), + ]), + ), + ( + LeaderBlockCommitOp { + commit_outs: vec![burn_addr_0.clone(), burn_addr_1.clone()], + ..default_block_commit.clone() + }, + Some(rs_pox_addrs_0b.clone()), + // it doesn't matter if we call burn_addr_1 punished or rewarded! + Ok(vec![ + Treatment::Punish(reward_addrs(0)), + Treatment::Punish(burn_addr_0.clone()), + ]), + ), + ( + LeaderBlockCommitOp { + commit_outs: vec![burn_addr_0.clone(), burn_addr_1.clone()], + ..default_block_commit.clone() + }, + Some(no_punish(&rs_pox_addrs)), + Err(op_error::BlockCommitBadOutputs), + ), + ( + LeaderBlockCommitOp { + commit_outs: vec![burn_addr_0.clone(), burn_addr_1.clone()], + ..default_block_commit.clone() + }, + Some(no_punish(&rs_pox_addrs_1b)), + Err(op_error::BlockCommitBadOutputs), + ), + ( + LeaderBlockCommitOp { + commit_outs: vec![burn_addr_0.clone(), burn_addr_1.clone()], + ..default_block_commit.clone() + }, + Some(no_punish(&rs_pox_addrs_0b)), + Err(op_error::BlockCommitBadOutputs), + ), + ( + LeaderBlockCommitOp { + commit_outs: vec![reward_addrs(0)], + ..default_block_commit.clone() + }, + Some(rs_pox_addrs.clone()), + Err(op_error::BlockCommitBadOutputs), + ), + ( + LeaderBlockCommitOp { + commit_outs: vec![reward_addrs(0), reward_addrs(3)], + ..default_block_commit.clone() + }, + Some(rs_pox_addrs.clone()), + Err(op_error::BlockCommitBadOutputs), + ), + ( + LeaderBlockCommitOp { + commit_outs: vec![reward_addrs(1), reward_addrs(3)], + ..default_block_commit.clone() + }, + Some(rs_pox_addrs.clone()), + Err(op_error::BlockCommitBadOutputs), + ), + ( + LeaderBlockCommitOp { + commit_outs: vec![burn_addr_0.clone(), reward_addrs(3)], + ..default_block_commit.clone() + }, + Some(rs_pox_addrs.clone()), + Err(op_error::BlockCommitBadOutputs), + ), + ]; + + for (ix, (op, reward_set_info, expected)) in test_vectors.iter_mut().enumerate() { + for should_reverse in [false, true] { + let reward_set_info = if should_reverse { + reward_set_info.as_ref().map(rev) + } else { + reward_set_info.clone() + }; + eprintln!("Processing {}", ix); + let mut ic = DescendencyStubbedSortitionHandle::Descended; + let output = op.check_pox( + StacksEpochId::Epoch30, + &burnchain, + &mut ic, + reward_set_info.as_ref(), + ); + eprintln!("{:?} <=?=> {:?}", expected, output); + match expected { + Err(e) => { + assert_eq!(format!("{e:?}"), format!("{:?}", &output.unwrap_err())); + } + Ok(expected_treatment) => { + assert!(output.is_ok()); + let actual_treatment = output.unwrap(); + assert_eq!(actual_treatment.len(), expected_treatment.len()); + for i in actual_treatment.iter() { + assert!(expected_treatment.contains(i)); + } + } + } + } + } + } + #[test] fn test_epoch_marker() { let first_block_height = 121; @@ -3122,6 +3551,7 @@ mod tests { }; let block_commit_pre_2_05 = LeaderBlockCommitOp { + treatment: vec![], sunset_burn: 0, block_header_hash: BlockHeaderHash([0x02; 32]), new_seed: VRFSeed([0x03; 32]), @@ -3151,6 +3581,7 @@ mod tests { }; let block_commit_post_2_05_valid = LeaderBlockCommitOp { + treatment: vec![], sunset_burn: 0, block_header_hash: BlockHeaderHash([0x03; 32]), new_seed: VRFSeed([0x04; 32]), @@ -3180,6 +3611,7 @@ mod tests { }; let block_commit_post_2_05_valid_bigger_epoch = LeaderBlockCommitOp { + treatment: vec![], sunset_burn: 0, block_header_hash: BlockHeaderHash([0x03; 32]), new_seed: VRFSeed([0x04; 32]), @@ -3209,6 +3641,7 @@ mod tests { }; let block_commit_post_2_05_invalid_bad_memo = LeaderBlockCommitOp { + treatment: vec![], sunset_burn: 0, block_header_hash: BlockHeaderHash([0x04; 32]), new_seed: VRFSeed([0x05; 32]), @@ -3238,6 +3671,7 @@ mod tests { }; let block_commit_post_2_05_invalid_no_memo = LeaderBlockCommitOp { + treatment: vec![], sunset_burn: 0, block_header_hash: BlockHeaderHash([0x05; 32]), new_seed: VRFSeed([0x06; 32]), @@ -3267,6 +3701,7 @@ mod tests { }; let block_commit_post_2_1_valid = LeaderBlockCommitOp { + treatment: vec![], sunset_burn: 0, block_header_hash: BlockHeaderHash([0x03; 32]), new_seed: VRFSeed([0x04; 32]), @@ -3296,6 +3731,7 @@ mod tests { }; let block_commit_post_2_1_valid_bigger_epoch = LeaderBlockCommitOp { + treatment: vec![], sunset_burn: 0, block_header_hash: BlockHeaderHash([0x03; 32]), new_seed: VRFSeed([0x04; 32]), @@ -3325,6 +3761,7 @@ mod tests { }; let block_commit_post_2_1_invalid_bad_memo = LeaderBlockCommitOp { + treatment: vec![], sunset_burn: 0, block_header_hash: BlockHeaderHash([0x04; 32]), new_seed: VRFSeed([0x05; 32]), @@ -3354,6 +3791,7 @@ mod tests { }; let block_commit_post_2_1_invalid_no_memo = LeaderBlockCommitOp { + treatment: vec![], sunset_burn: 0, block_header_hash: BlockHeaderHash([0x05; 32]), new_seed: VRFSeed([0x06; 32]), @@ -3384,7 +3822,7 @@ mod tests { let all_leader_key_ops = vec![leader_key]; - let all_block_commit_ops = vec![ + let mut all_block_commit_ops = vec![ (block_commit_pre_2_05, true), (block_commit_post_2_05_valid, true), (block_commit_post_2_05_valid_bigger_epoch, true), @@ -3417,12 +3855,12 @@ mod tests { eprintln!("Tip sortition is {}", &tip.sortition_id); let mut ic = SortitionHandleTx::begin(&mut db, &tip.sortition_id).unwrap(); - for (op, pass) in all_block_commit_ops.iter() { + for (op, pass) in all_block_commit_ops.iter_mut() { if op.block_height == i + 1 { match op.check(&burnchain, &mut ic, None) { Ok(_) => { assert!( - pass, + *pass, "Check succeeded when it should have failed: {:?}", &op ); @@ -3431,7 +3869,7 @@ mod tests { } Err(op_error::BlockCommitBadEpoch) => { assert!( - !pass, + !*pass, "Check failed when it should have succeeded: {:?}", &op ); diff --git a/stackslib/src/chainstate/burn/operations/mod.rs b/stackslib/src/chainstate/burn/operations/mod.rs index 90f7f792911..0843e03b1ec 100644 --- a/stackslib/src/chainstate/burn/operations/mod.rs +++ b/stackslib/src/chainstate/burn/operations/mod.rs @@ -27,6 +27,7 @@ use stacks_common::util::hash::{hex_bytes, to_hex, Hash160, Sha512Trunc256Sum}; use stacks_common::util::secp256k1::MessageSignature; use stacks_common::util::vrf::VRFPublicKey; +use self::leader_block_commit::Treatment; use crate::burnchains::{ Address, Burnchain, BurnchainBlockHeader, BurnchainRecipient, BurnchainSigner, BurnchainTransaction, Error as BurnchainError, PublicKey, Txid, @@ -41,7 +42,6 @@ use crate::util_lib::db::{DBConn, DBTx, Error as db_error}; pub mod delegate_stx; pub mod leader_block_commit; -/// This module contains all burn-chain operations pub mod leader_key_register; pub mod stack_stx; pub mod transfer_stx; @@ -50,6 +50,8 @@ pub mod vote_for_aggregate_key; #[cfg(test)] mod test; +/// This module contains all burn-chain operations + #[derive(Debug)] pub enum Error { /// Failed to parse the operation from the burnchain transaction @@ -241,6 +243,15 @@ pub struct LeaderBlockCommitOp { /// PoX/Burn outputs pub commit_outs: Vec, + + /// If the active epoch supports PoX reward/punishment + /// via burns, this vector will contain the treatment (rewarded or punished) + /// of the PoX addresses active during the block commit. + /// + /// This value is set by the check() call, not during parsing. + #[serde(default = "default_treatment")] + pub treatment: Vec, + // PoX sunset burn pub sunset_burn: u64, @@ -251,6 +262,10 @@ pub struct LeaderBlockCommitOp { pub burn_header_hash: BurnchainHeaderHash, // hash of the burn chain block header } +fn default_treatment() -> Vec { + Vec::new() +} + #[derive(Debug, PartialEq, Clone, Eq, Serialize, Deserialize)] pub struct LeaderKeyRegisterOp { pub consensus_hash: ConsensusHash, // consensus hash at time of issuance @@ -269,7 +284,7 @@ pub struct DelegateStxOp { pub sender: StacksAddress, pub delegate_to: StacksAddress, /// a tuple representing the output index of the reward address in the BTC transaction, - // and the actual PoX reward address. + /// and the actual PoX reward address. /// NOTE: the address in .pox-2 will be tagged as either p2pkh or p2sh; it's impossible to tell /// if it's a segwit-p2sh since that looks identical to a p2sh address. pub reward_addr: Option<(u32, PoxAddress)>, diff --git a/stackslib/src/chainstate/burn/operations/stack_stx.rs b/stackslib/src/chainstate/burn/operations/stack_stx.rs index 20dca3187a7..c4c54b97374 100644 --- a/stackslib/src/chainstate/burn/operations/stack_stx.rs +++ b/stackslib/src/chainstate/burn/operations/stack_stx.rs @@ -181,6 +181,8 @@ impl StackStxOp { } } + // TODO: add tests from mutation testing results #4850 + #[cfg_attr(test, mutants::skip)] fn parse_data(data: &Vec) -> Option { /* Wire format: @@ -271,6 +273,8 @@ impl StackStxOp { ) } + // TODO: add tests from mutation testing results #4851 + #[cfg_attr(test, mutants::skip)] /// parse a StackStxOp /// `pox_sunset_ht` is the height at which PoX *disables* pub fn parse_from_tx( diff --git a/stackslib/src/chainstate/burn/sortition.rs b/stackslib/src/chainstate/burn/sortition.rs index 7d86f848037..b0221f14392 100644 --- a/stackslib/src/chainstate/burn/sortition.rs +++ b/stackslib/src/chainstate/burn/sortition.rs @@ -26,9 +26,11 @@ use stacks_common::util::log; use stacks_common::util::uint::{BitArray, Uint256, Uint512}; use crate::burnchains::{ - Address, Burnchain, BurnchainBlock, BurnchainBlockHeader, PublicKey, Txid, + Address, Burnchain, BurnchainBlock, BurnchainBlockHeader, BurnchainSigner, + BurnchainStateTransition, PublicKey, Txid, }; -use crate::chainstate::burn::db::sortdb::SortitionHandleTx; +use crate::chainstate::burn::atc::{AtcRational, ATC_LOOKUP}; +use crate::chainstate::burn::db::sortdb::{SortitionDB, SortitionHandleTx}; use crate::chainstate::burn::distribution::BurnSamplePoint; use crate::chainstate::burn::operations::{ BlockstackOperationType, LeaderBlockCommitOp, LeaderKeyRegisterOp, @@ -143,8 +145,8 @@ impl BlockSnapshot { for i in 0..dist.len() { if (dist[i].range_start <= index) && (index < dist[i].range_end) { debug!( - "Sampled {}: sortition index = {}", - dist[i].candidate.block_header_hash, &index + "Sampled {}: i = {}, sortition index = {}", + dist[i].candidate.block_header_hash, i, &index ); return Some(i); } @@ -154,30 +156,21 @@ impl BlockSnapshot { panic!("FATAL ERROR: unable to map {} to a range", index); } - /// Select the next Stacks block header hash using cryptographic sortition. - /// Go through all block commits at this height, find out how any burn tokens - /// were spent for them, and select one at random using the relative burn amounts - /// to weight the sample. Use HASH(sortition_hash ++ last_VRF_seed) to pick the - /// winning block commit, and by extension, the next VRF seed. - /// - /// If there are no block commits outstanding, then no winner is picked. - /// - /// Note that the VRF seed is not guaranteed to be the hash of a valid VRF - /// proof. Miners would only build off of leader block commits for which they - /// (1) have the associated block data and (2) the proof in that block is valid. - fn select_winning_block( + /// Get the last winning miner's VRF seed in this block's fork. + /// Returns Ok(VRF seed) on success + /// Returns Err(..) on DB error + /// An initial VRF seed value will be returned if there are no prior commits. + fn get_last_vrf_seed( sort_tx: &mut SortitionHandleTx, block_header: &BurnchainBlockHeader, - sortition_hash: &SortitionHash, - burn_dist: &[BurnSamplePoint], - ) -> Result, db_error> { + ) -> Result { let burn_block_height = block_header.block_height; // get the last winner's VRF seed in this block's fork let last_sortition_snapshot = sort_tx.get_last_snapshot_with_sortition(burn_block_height - 1)?; - let VRF_seed = if last_sortition_snapshot.is_initial() { + let vrf_seed = if last_sortition_snapshot.is_initial() { // this is the sentinal "first-sortition" block VRFSeed::initial() } else { @@ -190,10 +183,31 @@ impl BlockSnapshot { .expect("FATAL ERROR: no winning block commits in database (indicates corruption)") .new_seed }; + Ok(vrf_seed) + } + + /// Select the next Stacks block header hash using cryptographic sortition. + /// Go through all block commits at this height, find out how many burn tokens + /// were spent for them, and select one at random using the relative burn amounts + /// to weight the sample. Use HASH(sortition_hash ++ last_VRF_seed) to pick the + /// winning block commit, and by extension, the next VRF seed. + /// + /// If there are no block commits outstanding, then no winner is picked. + /// + /// Note that the VRF seed is not guaranteed to be the hash of a valid VRF + /// proof. Miners would only build off of leader block commits for which they + /// (1) have the associated block data and (2) the proof in that block is valid. + fn select_winning_block( + sort_tx: &mut SortitionHandleTx, + block_header: &BurnchainBlockHeader, + sortition_hash: &SortitionHash, + burn_dist: &[BurnSamplePoint], + ) -> Result, db_error> { + let vrf_seed = Self::get_last_vrf_seed(sort_tx, block_header)?; // pick the next winner let win_idx_opt = - BlockSnapshot::sample_burn_distribution(burn_dist, &VRF_seed, sortition_hash); + BlockSnapshot::sample_burn_distribution(burn_dist, &vrf_seed, sortition_hash); match win_idx_opt { None => { // no winner @@ -201,7 +215,7 @@ impl BlockSnapshot { } Some(win_idx) => { // winner! - Ok(Some(burn_dist[win_idx].candidate.clone())) + Ok(Some((burn_dist[win_idx].candidate.clone(), win_idx))) } } } @@ -216,7 +230,7 @@ impl BlockSnapshot { first_block_height: u64, burn_total: u64, sortition_hash: &SortitionHash, - txids: &Vec, + txids: &[Txid], accumulated_coinbase_ustx: u128, ) -> Result { let block_height = block_header.block_height; @@ -269,6 +283,210 @@ impl BlockSnapshot { }) } + /// Determine if we need to reject a block-commit due to miner inactivity. + /// Return true if the miner is sufficiently active. + /// Return false if not. + fn check_miner_is_active( + epoch_id: StacksEpochId, + sampled_window_len: usize, + winning_block_sender: &BurnchainSigner, + miner_frequency: u8, + ) -> bool { + // miner frequency only applies if the window is at least as long as the commit window + // sampled from the chain state (e.g. because this window can be 1 during the prepare + // phase) + let epoch_frequency_usize = + usize::try_from(epoch_id.mining_commitment_frequency()).expect("Infallible"); + if usize::from(miner_frequency) < epoch_frequency_usize.min(sampled_window_len) { + // this miner didn't mine often enough to win anyway + info!("Miner did not mine often enough to win"; + "miner_sender" => %winning_block_sender, + "miner_frequency" => miner_frequency, + "minimum_frequency" => epoch_id.mining_commitment_frequency(), + "window_length" => sampled_window_len); + + return false; + } + + true + } + + /// Determine the miner's assumed total commit carryover. + /// + /// total-block-spend + /// This is ATC = min(1, ----------------------------------- ) + /// median-windowed-total-block-spend + /// + /// Now, this value is 1.0 in the "happy path" case where miners commit the same BTC in this + /// block as they had done so over the majority of the windowed burnchain blocks. + /// + /// It's also 1.0 if miners spend _more_ than this median. + /// + /// It's between 0.0 and 1.0 only if miners spend _less_ than this median. At this point, it's + /// possible that the "null miner" can win sortition, and the probability of that null miner + /// winning is a function of (1.0 - ATC). + /// + /// Returns the ATC value, and whether or not it decreased. If the ATC decreased, then we must + /// invoke the null miner. + fn get_miner_commit_carryover( + total_burns: Option, + windowed_median_burns: Option, + ) -> (AtcRational, bool) { + let Some(block_burn_total) = total_burns else { + // overflow + return (AtcRational::zero(), false); + }; + + let Some(windowed_median_burns) = windowed_median_burns else { + // overflow + return (AtcRational::zero(), false); + }; + + if windowed_median_burns == 0 { + // no carried commit, so null miner wins by default. + return (AtcRational::zero(), true); + } + + if block_burn_total >= windowed_median_burns { + // clamp to 1.0, and ATC increased + return (AtcRational::one(), false); + } + + ( + AtcRational::frac(block_burn_total, windowed_median_burns), + true, + ) + } + + /// Evaluate the advantage logistic function on the given ATC value. + /// The ATC value will be used to index a lookup table of AtcRationals. + pub(crate) fn null_miner_logistic(atc: AtcRational) -> AtcRational { + let atc_clamp = atc.min(&AtcRational::one()); + let index_max = + u64::try_from(ATC_LOOKUP.len() - 1).expect("infallible -- u64 can't hold 1023usize"); + let index_u64 = if let Some(index_rational) = atc_clamp.mul(&AtcRational::frac(1024, 1)) { + // extract integer part + index_rational.ipart().min(index_max) + } else { + index_max + }; + let index = usize::try_from(index_u64) + .expect("infallible -- usize can't hold u64 integers in [0, 1024)"); + ATC_LOOKUP + .get(index) + .cloned() + .unwrap_or_else(|| ATC_LOOKUP.last().cloned().expect("infallible")) + } + + /// Determine the probability that the null miner will win, given the atc shortage. + /// + /// This is NullP(atc) = (1 - atc) + atc * adv(atc). + /// + /// Where adv(x) is an "advantage function", such that the null miner is more heavily favored + /// to win based on how comparatively little commit carryover there is. Here, adv(x) is a + /// logistic function. + /// + /// In a linear setting -- i.e. the probability of the null miner winning being proportional to + /// the missing carryover -- the probability would simply be (1 - atc). If miners spent only + /// X% of the assumed total commit, then the null miner ought to win with probability (1 - X)%. + /// However, the null miner is advantaged more if the missing carryover is smaller. This is + /// captured with the extra `atc * adv(atc)` term. + pub(crate) fn null_miner_probability(atc: AtcRational) -> AtcRational { + // compute min(1.0, (1.0 - atc) + (atc * adv)) + let adv = Self::null_miner_logistic(atc); + let Some(one_minus_atc) = AtcRational::one().sub(&atc) else { + // somehow, ATC > 1.0, then miners spent more than they did in the last sortition. + // So, the null miner loses. + warn!("ATC > 1.0 ({})", &atc.to_hex()); + return AtcRational::zero(); + }; + + let Some(atc_prod_adv) = atc.mul(&adv) else { + // if this is somehow too big (impossible), it would otherwise imply that the null + // miner advantage is overwhelming + warn!("ATC * ADV == INF ({} * {})", &atc.to_hex(), &adv.to_hex()); + return AtcRational::one(); + }; + + let Some(sum) = one_minus_atc.add(&atc_prod_adv) else { + // if this is somehow too big (impossible), it would otherwise imply that the null + // miner advantage is overwhelming + warn!( + "(1.0 - ATC) + (ATC * ADV) == INF ({} * {})", + &one_minus_atc.to_hex(), + &atc_prod_adv.to_hex() + ); + return AtcRational::one(); + }; + sum.min(&AtcRational::one()) + } + + /// Determine whether or not the null miner has won sortition. + /// This works by creating a second burn distribution: one with the winning block-commit, and + /// one with the null miner. The null miner's mining power will be computed as a function of + /// their ATC advantage. + fn null_miner_wins( + sort_tx: &mut SortitionHandleTx, + block_header: &BurnchainBlockHeader, + sortition_hash: &SortitionHash, + commit_winner: &LeaderBlockCommitOp, + atc: AtcRational, + ) -> Result { + let vrf_seed = Self::get_last_vrf_seed(sort_tx, block_header)?; + + let mut null_winner = commit_winner.clone(); + null_winner.block_header_hash = { + // make the block header hash different, to render it different from the winner. + // Just flip the block header bits. + let mut bhh_bytes = null_winner.block_header_hash.0.clone(); + for byte in bhh_bytes.iter_mut() { + *byte = !*byte; + } + BlockHeaderHash(bhh_bytes) + }; + + let mut null_sample_winner = BurnSamplePoint::zero(null_winner.clone()); + let mut burn_sample_winner = BurnSamplePoint::zero(commit_winner.clone()); + + let null_prob = Self::null_miner_probability(atc); + let null_prob_u256 = null_prob.into_sortition_probability(); + + test_debug!( + "atc = {}, null_prob = {}, null_prob_u256 = {}, sortition_hash: {}", + atc.to_hex(), + null_prob.to_hex(), + null_prob_u256.to_hex_be(), + sortition_hash + ); + null_sample_winner.range_start = Uint256::zero(); + null_sample_winner.range_end = null_prob_u256; + + burn_sample_winner.range_start = null_prob_u256; + burn_sample_winner.range_end = Uint256::max(); + + let burn_dist = [ + // the only fields that matter here are: + // * range_start + // * range_end + // * candidate + null_sample_winner, + burn_sample_winner, + ]; + + // pick the next winner + let Some(win_idx) = + BlockSnapshot::sample_burn_distribution(&burn_dist, &vrf_seed, sortition_hash) + else { + // miner wins by default if there's no winner index + return Ok(false); + }; + + test_debug!("win_idx = {}", win_idx); + + // null miner is index 0 + Ok(win_idx == 0) + } + /// Make a block snapshot from is block's data and the previous block. /// This process will: /// * calculate the new consensus hash @@ -286,10 +504,42 @@ impl BlockSnapshot { my_pox_id: &PoxId, parent_snapshot: &BlockSnapshot, block_header: &BurnchainBlockHeader, - burn_dist: &[BurnSamplePoint], - txids: &Vec, - block_burn_total: Option, + state_transition: &BurnchainStateTransition, + initial_mining_bonus_ustx: u128, + ) -> Result { + // what epoch will this snapshot be in? + let epoch_id = SortitionDB::get_stacks_epoch(sort_tx, parent_snapshot.block_height + 1)? + .unwrap_or_else(|| { + panic!( + "FATAL: no epoch defined at burn height {}", + parent_snapshot.block_height + 1 + ) + }) + .epoch_id; + + Self::make_snapshot_in_epoch( + sort_tx, + burnchain, + my_sortition_id, + my_pox_id, + parent_snapshot, + block_header, + state_transition, + initial_mining_bonus_ustx, + epoch_id, + ) + } + + pub fn make_snapshot_in_epoch( + sort_tx: &mut SortitionHandleTx, + burnchain: &Burnchain, + my_sortition_id: &SortitionId, + my_pox_id: &PoxId, + parent_snapshot: &BlockSnapshot, + block_header: &BurnchainBlockHeader, + state_transition: &BurnchainStateTransition, initial_mining_bonus_ustx: u128, + epoch_id: StacksEpochId, ) -> Result { assert_eq!( parent_snapshot.burn_header_hash, @@ -332,12 +582,12 @@ impl BlockSnapshot { first_block_height, last_burn_total, &next_sortition_hash, - &txids, + &state_transition.txids(), accumulated_coinbase_ustx, ) }; - if burn_dist.len() == 0 { + if state_transition.burn_dist.len() == 0 { // no burns happened debug!( "No burns happened in block"; @@ -350,7 +600,7 @@ impl BlockSnapshot { // NOTE: this only counts burns from leader block commits and user burns that match them. // It ignores user burns that don't match any block. - let block_burn_total = match block_burn_total { + let block_burn_total = match state_transition.total_burns() { Some(total) => { if total == 0 { // no one burned, so no sortition @@ -384,18 +634,77 @@ impl BlockSnapshot { }; // Try to pick a next block. - let winning_block = BlockSnapshot::select_winning_block( + let (winning_block, winning_block_burn_dist_index) = BlockSnapshot::select_winning_block( sort_tx, block_header, &next_sortition_hash, - burn_dist, + &state_transition.burn_dist, )? .expect("FATAL: there must be a winner if the burn distribution has 1 or more points"); + // in epoch 3.x and later (Nakamoto and later), there's two additional changes: + // * if the winning miner didn't mine in more than k of n blocks of the window, then their chances of + // winning are 0. + // * There exists a "null miner" that can win sortition, in which case there is no + // sortition. This happens if the assumed total commit with carry-over is sufficiently low. + let mut reject_winner_reason = None; + if epoch_id >= StacksEpochId::Epoch30 { + if !Self::check_miner_is_active( + epoch_id, + state_transition.windowed_block_commits.len(), + &winning_block.apparent_sender, + state_transition.burn_dist[winning_block_burn_dist_index].frequency, + ) { + reject_winner_reason = Some("Miner did not mine often enough to win".to_string()); + } + let (atc, null_active) = Self::get_miner_commit_carryover( + state_transition.total_burns(), + state_transition.windowed_median_burns(), + ); + if null_active && reject_winner_reason.is_none() { + // there's a chance the null miner can win + if Self::null_miner_wins( + sort_tx, + block_header, + &next_sortition_hash, + &winning_block, + atc, + )? { + // null wins + reject_winner_reason = Some( + "Null miner defeats block winner due to insufficient commit carryover" + .to_string(), + ); + } + } + } + + if let Some(reject_winner_reason) = reject_winner_reason { + info!("SORTITION({block_height}): WINNER REJECTED: {reject_winner_reason:?}"; + "txid" => %winning_block.txid, + "stacks_block_hash" => %winning_block.block_header_hash, + "burn_block_hash" => %winning_block.burn_header_hash); + + // N.B. can't use `make_snapshot_no_sortition()` helper here because then `sort_tx` + // would be mutably borrowed twice. + return BlockSnapshot::make_snapshot_no_sortition( + sort_tx, + my_sortition_id, + my_pox_id, + parent_snapshot, + block_header, + first_block_height, + last_burn_total, + &next_sortition_hash, + &state_transition.txids(), + accumulated_coinbase_ustx, + ); + } + // mix in the winning block's VRF seed to the sortition hash. The next block commits must // prove on this final sortition hash. let final_sortition_hash = next_sortition_hash.mix_VRF_seed(&winning_block.new_seed); - let next_ops_hash = OpsHash::from_txids(&txids); + let next_ops_hash = OpsHash::from_txids(&state_transition.txids()); let next_ch = ConsensusHash::from_parent_block_data( sort_tx, &next_ops_hash, @@ -406,10 +715,10 @@ impl BlockSnapshot { my_pox_id, )?; - debug!( - "SORTITION({}): WINNER IS {:?} (from {:?})", - block_height, &winning_block.block_header_hash, &winning_block.txid - ); + info!("SORTITION({block_height}): WINNER SELECTED"; + "txid" => %winning_block.txid, + "stacks_block_hash" => %winning_block.block_header_hash, + "burn_block_hash" => %winning_block.burn_header_hash); let miner_pk_hash = sort_tx .get_leader_key_at( @@ -461,8 +770,11 @@ mod test { use super::*; use crate::burnchains::tests::*; - use crate::burnchains::*; + use crate::burnchains::{BurnchainSigner, *}; + use crate::chainstate::burn::atc::AtcRational; + use crate::chainstate::burn::db::sortdb::tests::test_append_snapshot_with_winner; use crate::chainstate::burn::db::sortdb::*; + use crate::chainstate::burn::operations::leader_block_commit::BURN_BLOCK_MINED_AT_MODULUS; use crate::chainstate::burn::operations::*; use crate::chainstate::stacks::*; @@ -473,10 +785,8 @@ mod test { my_pox_id: &PoxId, parent_snapshot: &BlockSnapshot, block_header: &BurnchainBlockHeader, - burn_dist: &[BurnSamplePoint], - txids: &Vec, + burnchain_state_transition: &BurnchainStateTransition, ) -> Result { - let total_burn = BurnSamplePoint::get_total_burns(burn_dist); BlockSnapshot::make_snapshot( sort_tx, burnchain, @@ -484,9 +794,7 @@ mod test { my_pox_id, parent_snapshot, block_header, - burn_dist, - txids, - total_burn, + burnchain_state_transition, 0, ) } @@ -540,8 +848,7 @@ mod test { &pox_id, &initial_snapshot, &empty_block_header, - &vec![], - &vec![], + &BurnchainStateTransition::noop(), ) .unwrap(); sn @@ -567,6 +874,7 @@ mod test { 0xFFFFFFFFFFFFFFFF, 0xFFFFFFFFFFFFFFFF, ]), + frequency: 10, candidate: LeaderBlockCommitOp::initial( &BlockHeaderHash([1u8; 32]), first_block_height + 1, @@ -594,8 +902,11 @@ mod test { &pox_id, &initial_snapshot, &empty_block_header, - &vec![empty_burn_point.clone()], - &vec![key.txid.clone()], + &BurnchainStateTransition { + burn_dist: vec![empty_burn_point.clone()], + accepted_ops: vec![BlockstackOperationType::LeaderKeyRegister(key.clone())], + ..BurnchainStateTransition::noop() + }, ) .unwrap(); sn @@ -604,4 +915,260 @@ mod test { assert!(!snapshot_no_burns.sortition); assert_eq!(snapshot_no_transactions.total_burn, 0); } + + #[test] + fn test_check_is_miner_active() { + assert_eq!(StacksEpochId::Epoch30.mining_commitment_frequency(), 3); + assert_eq!(StacksEpochId::Epoch25.mining_commitment_frequency(), 0); + + // reward phase + assert!(BlockSnapshot::check_miner_is_active( + StacksEpochId::Epoch30, + 6, + &BurnchainSigner("".to_string()), + 6 + )); + assert!(BlockSnapshot::check_miner_is_active( + StacksEpochId::Epoch30, + 6, + &BurnchainSigner("".to_string()), + 5 + )); + assert!(BlockSnapshot::check_miner_is_active( + StacksEpochId::Epoch30, + 6, + &BurnchainSigner("".to_string()), + 4 + )); + assert!(BlockSnapshot::check_miner_is_active( + StacksEpochId::Epoch30, + 6, + &BurnchainSigner("".to_string()), + 3 + )); + assert!(!BlockSnapshot::check_miner_is_active( + StacksEpochId::Epoch30, + 6, + &BurnchainSigner("".to_string()), + 2 + )); + + // prepare phase + assert!(BlockSnapshot::check_miner_is_active( + StacksEpochId::Epoch30, + 1, + &BurnchainSigner("".to_string()), + 5 + )); + assert!(BlockSnapshot::check_miner_is_active( + StacksEpochId::Epoch30, + 1, + &BurnchainSigner("".to_string()), + 4 + )); + assert!(BlockSnapshot::check_miner_is_active( + StacksEpochId::Epoch30, + 1, + &BurnchainSigner("".to_string()), + 3 + )); + assert!(BlockSnapshot::check_miner_is_active( + StacksEpochId::Epoch30, + 1, + &BurnchainSigner("".to_string()), + 2 + )); + assert!(BlockSnapshot::check_miner_is_active( + StacksEpochId::Epoch30, + 1, + &BurnchainSigner("".to_string()), + 1 + )); + assert!(!BlockSnapshot::check_miner_is_active( + StacksEpochId::Epoch30, + 1, + &BurnchainSigner("".to_string()), + 0 + )); + } + + #[test] + fn test_get_miner_commit_carryover() { + assert_eq!( + BlockSnapshot::get_miner_commit_carryover(None, None), + (AtcRational::zero(), false) + ); + assert_eq!( + BlockSnapshot::get_miner_commit_carryover(None, Some(1)), + (AtcRational::zero(), false) + ); + assert_eq!( + BlockSnapshot::get_miner_commit_carryover(Some(1), None), + (AtcRational::zero(), false) + ); + + // ATC increased + assert_eq!( + BlockSnapshot::get_miner_commit_carryover(Some(1), Some(1)), + (AtcRational::one(), false) + ); + assert_eq!( + BlockSnapshot::get_miner_commit_carryover(Some(2), Some(1)), + (AtcRational::one(), false) + ); + + // no carried commit + assert_eq!( + BlockSnapshot::get_miner_commit_carryover(Some(2), Some(0)), + (AtcRational::zero(), true) + ); + + // assumed carryover + assert_eq!( + BlockSnapshot::get_miner_commit_carryover(Some(2), Some(4)), + (AtcRational::frac(2, 4), true) + ); + } + + #[test] + fn test_null_miner_logistic() { + for i in 0..1024 { + let atc_u256 = ATC_LOOKUP[i]; + let null_miner_lgst = + BlockSnapshot::null_miner_logistic(AtcRational::frac(i as u64, 1024)); + assert_eq!(null_miner_lgst, atc_u256); + } + assert_eq!( + BlockSnapshot::null_miner_logistic(AtcRational::zero()), + ATC_LOOKUP[0] + ); + assert_eq!( + BlockSnapshot::null_miner_logistic(AtcRational::one()), + *ATC_LOOKUP.last().as_ref().cloned().unwrap() + ); + assert_eq!( + BlockSnapshot::null_miner_logistic(AtcRational::frac(100, 1)), + *ATC_LOOKUP.last().as_ref().cloned().unwrap() + ); + } + + /// This test runs 100 sortitions, and in each sortition, it verifies that the null miner will + /// win for the range of ATC-C values which put the sortition index into the null miner's + /// BurnSamplePoint range. The ATC-C values directly influence the null miner's + /// BurnSamplePoint range, so given a fixed sortition index, we can verify that the + /// `null_miner_wins()` function returns `true` exactly when the sortition index falls into the + /// null miner's range. The ATC-C values are sampled through linear interpolation between 0.0 + /// and 1.0 in steps of 0.01. + #[test] + fn test_null_miner_wins() { + let first_burn_hash = BurnchainHeaderHash([0xfe; 32]); + let parent_first_burn_hash = BurnchainHeaderHash([0xff; 32]); + let first_block_height = 120; + + let mut prev_block_header = BurnchainBlockHeader { + block_height: first_block_height, + block_hash: first_burn_hash.clone(), + parent_block_hash: parent_first_burn_hash.clone(), + num_txs: 0, + timestamp: 12345, + }; + + let burnchain = Burnchain { + pox_constants: PoxConstants::test_default(), + peer_version: 0x012345678, + network_id: 0x9abcdef0, + chain_name: "bitcoin".to_string(), + network_name: "testnet".to_string(), + working_dir: "/nope".to_string(), + consensus_hash_lifetime: 24, + stable_confirmations: 7, + first_block_timestamp: 0, + first_block_height, + initial_reward_start_block: first_block_height, + first_block_hash: first_burn_hash.clone(), + }; + + let mut db = SortitionDB::connect_test(first_block_height, &first_burn_hash).unwrap(); + + for i in 0..100 { + let header = BurnchainBlockHeader { + block_height: prev_block_header.block_height + 1, + block_hash: BurnchainHeaderHash([i as u8; 32]), + parent_block_hash: prev_block_header.block_hash.clone(), + num_txs: 0, + timestamp: prev_block_header.timestamp + (i as u64) + 1, + }; + + let sortition_hash = SortitionHash([i as u8; 32]); + + let commit_winner = LeaderBlockCommitOp { + sunset_burn: 0, + block_header_hash: BlockHeaderHash([i as u8; 32]), + new_seed: VRFSeed([i as u8; 32]), + parent_block_ptr: 0, + parent_vtxindex: 0, + key_block_ptr: 0, + key_vtxindex: 0, + memo: vec![0x80], + commit_outs: vec![], + + burn_fee: 100, + input: (Txid([0; 32]), 0), + apparent_sender: BurnchainSigner(format!("signer {}", i)), + txid: Txid([i as u8; 32]), + vtxindex: 0, + block_height: header.block_height, + burn_parent_modulus: (i % BURN_BLOCK_MINED_AT_MODULUS) as u8, + burn_header_hash: header.block_hash.clone(), + treatment: vec![], + }; + + let tip = SortitionDB::get_canonical_burn_chain_tip(db.conn()).unwrap(); + test_append_snapshot_with_winner( + &mut db, + header.block_hash.clone(), + &vec![BlockstackOperationType::LeaderBlockCommit( + commit_winner.clone(), + )], + Some(tip), + Some(commit_winner.clone()), + ); + + let mut sort_tx = db.tx_begin_at_tip(); + + for j in 0..100 { + let atc = AtcRational::from_f64_unit((j as f64) / 100.0); + let null_prob = BlockSnapshot::null_miner_probability(atc); + + // NOTE: this tests .into_sortition_probability() + let null_prob_u256 = if null_prob.inner() >= AtcRational::one().inner() { + // prevent left-shift overflow + AtcRational::one_sup().into_inner() << 192 + } else { + null_prob.into_inner() << 192 + }; + + let null_wins = BlockSnapshot::null_miner_wins( + &mut sort_tx, + &header, + &sortition_hash, + &commit_winner, + atc, + ) + .unwrap(); + debug!("null_wins: {},{}: {}", i, j, null_wins); + + let vrf_seed = BlockSnapshot::get_last_vrf_seed(&mut sort_tx, &header).unwrap(); + let index = sortition_hash.mix_VRF_seed(&vrf_seed).to_uint256(); + + if index < null_prob_u256 { + assert!(null_wins); + } else { + assert!(!null_wins); + } + } + + prev_block_header = header.clone(); + } + } } diff --git a/stackslib/src/chainstate/coordinator/comm.rs b/stackslib/src/chainstate/coordinator/comm.rs index 374ab72996a..cc6c2f1b3ba 100644 --- a/stackslib/src/chainstate/coordinator/comm.rs +++ b/stackslib/src/chainstate/coordinator/comm.rs @@ -247,8 +247,8 @@ impl CoordinatorCommunication { }; let rcvrs = CoordinatorReceivers { - signal_bools: signal_bools, - signal_wakeup: signal_wakeup, + signal_bools, + signal_wakeup, stacks_blocks_processed, sortitions_processed, refresh_stacker_db, diff --git a/stackslib/src/chainstate/coordinator/mod.rs b/stackslib/src/chainstate/coordinator/mod.rs index e54e9f02058..d3b6fd5f3eb 100644 --- a/stackslib/src/chainstate/coordinator/mod.rs +++ b/stackslib/src/chainstate/coordinator/mod.rs @@ -66,7 +66,7 @@ use crate::chainstate::stacks::events::{ StacksBlockEventData, StacksTransactionEvent, StacksTransactionReceipt, TransactionOrigin, }; use crate::chainstate::stacks::index::marf::MARFOpenOpts; -use crate::chainstate::stacks::index::MarfTrieId; +use crate::chainstate::stacks::index::{Error as IndexError, MarfTrieId}; use crate::chainstate::stacks::miner::{signal_mining_blocked, signal_mining_ready, MinerStatus}; use crate::chainstate::stacks::{ Error as ChainstateError, StacksBlock, StacksBlockHeader, TransactionPayload, @@ -120,7 +120,7 @@ impl NewBurnchainBlockStatus { } } -#[derive(Debug, PartialEq, Serialize, Deserialize)] +#[derive(Debug, PartialEq, Clone, Serialize, Deserialize)] pub struct RewardCycleInfo { pub reward_cycle: u64, pub anchor_status: PoxAnchorBlockStatus, @@ -252,6 +252,7 @@ pub enum Error { NoSortitions, FailedToProcessSortition(BurnchainError), DBError(DBError), + IndexError(IndexError), NotPrepareEndBlock, NotPoXAnchorBlock, NotInPreparePhase, @@ -278,6 +279,12 @@ impl From for Error { } } +impl From for Error { + fn from(o: IndexError) -> Error { + Error::IndexError(o) + } +} + pub trait RewardSetProvider { fn get_reward_set( &self, @@ -752,6 +759,7 @@ pub fn get_reward_cycle_info( ) -> Result, Error> { let epoch_at_height = SortitionDB::get_stacks_epoch(sort_db.conn(), burn_height)? .unwrap_or_else(|| panic!("FATAL: no epoch defined for burn height {}", burn_height)); + if !burnchain.is_reward_cycle_start(burn_height) { return Ok(None); } @@ -788,49 +796,54 @@ pub fn get_reward_cycle_info( ic.get_chosen_pox_anchor(burnchain_db_conn_opt, &parent_bhh, &burnchain.pox_constants) }?; - let reward_cycle_info = if let Some((consensus_hash, stacks_block_hash, txid)) = - reward_cycle_info - { - let anchor_block_known = StacksChainState::is_stacks_block_processed( - &chain_state.db(), - &consensus_hash, - &stacks_block_hash, - )?; - info!( - "PoX Anchor block selected"; - "cycle" => reward_cycle, - "consensus_hash" => %consensus_hash, - "block_hash" => %stacks_block_hash, - "block_id" => %StacksBlockId::new(&consensus_hash, &stacks_block_hash), - "is_known" => anchor_block_known, - "commit_txid" => %txid, - "cycle_burn_height" => burn_height - ); - let anchor_status = if anchor_block_known { - let block_id = StacksBlockId::new(&consensus_hash, &stacks_block_hash); - let reward_set = - provider.get_reward_set(burn_height, chain_state, burnchain, sort_db, &block_id)?; - PoxAnchorBlockStatus::SelectedAndKnown(stacks_block_hash, txid, reward_set) + let reward_cycle_info = + if let Some((consensus_hash, stacks_block_hash, txid)) = reward_cycle_info { + let anchor_block_known = StacksChainState::is_stacks_block_processed( + &chain_state.db(), + &consensus_hash, + &stacks_block_hash, + )?; + let stacks_block_id = StacksBlockId::new(&consensus_hash, &stacks_block_hash); + info!( + "PoX Anchor block selected"; + "cycle" => reward_cycle, + "consensus_hash" => %consensus_hash, + "stacks_block_hash" => %stacks_block_hash, + "stacks_block_id" => %stacks_block_id, + "is_known" => anchor_block_known, + "commit_txid" => %txid, + "cycle_burn_height" => burn_height + ); + let anchor_status = if anchor_block_known { + let reward_set = provider.get_reward_set( + burn_height, + chain_state, + burnchain, + sort_db, + &stacks_block_id, + )?; + PoxAnchorBlockStatus::SelectedAndKnown(stacks_block_hash, txid, reward_set) + } else { + PoxAnchorBlockStatus::SelectedAndUnknown(stacks_block_hash, txid) + }; + RewardCycleInfo { + reward_cycle, + anchor_status, + } } else { - PoxAnchorBlockStatus::SelectedAndUnknown(stacks_block_hash, txid) + info!( + "PoX anchor block NOT chosen for reward cycle {} at burn height {}", + reward_cycle, burn_height + ); + RewardCycleInfo { + reward_cycle, + anchor_status: PoxAnchorBlockStatus::NotSelected, + } }; - RewardCycleInfo { - reward_cycle, - anchor_status, - } - } else { - info!( - "PoX anchor block NOT chosen for reward cycle {} at burn height {}", - reward_cycle, burn_height - ); - RewardCycleInfo { - reward_cycle, - anchor_status: PoxAnchorBlockStatus::NotSelected, - } - }; // cache the reward cycle info as of the first sortition in the prepare phase, so that - // the Nakamoto epoch can go find it later + // the first Nakamoto epoch can go find it later. Subsequent Nakamoto epochs will use the + // reward set stored to the Nakamoto chain state. let ic = sort_db.index_handle(sortition_tip); let prev_reward_cycle = burnchain .block_height_to_reward_cycle(burn_height) @@ -845,9 +858,29 @@ pub fn get_reward_cycle_info( .expect("FATAL: no start-of-prepare-phase sortition"); let mut tx = sort_db.tx_begin()?; - if SortitionDB::get_preprocessed_reward_set(&mut tx, &first_prepare_sn.sortition_id)? - .is_none() - { + let preprocessed_reward_set = + SortitionDB::get_preprocessed_reward_set(&mut tx, &first_prepare_sn.sortition_id)?; + + // It's possible that we haven't processed the PoX anchor block at the time we have + // processed the burnchain block which commits to it. In this case, the PoX anchor block + // status would be SelectedAndUnknown. However, it's overwhelmingly likely (and in + // Nakamoto, _required_) that the PoX anchor block will be processed shortly thereafter. + // When this happens, we need to _update_ the sortition DB with the newly-processed reward + // set. This code performs this check to determine whether or not we need to store this + // calculated reward set. + let need_to_store = if let Some(reward_cycle_info) = preprocessed_reward_set { + // overwrite if we have an unknown anchor block + !reward_cycle_info.is_reward_info_known() + } else { + true + }; + if need_to_store { + debug!( + "Store preprocessed reward set for cycle"; + "reward_cycle" => prev_reward_cycle, + "prepare-start sortition" => %first_prepare_sn.sortition_id, + "reward_cycle_info" => format!("{:?}", &reward_cycle_info) + ); SortitionDB::store_preprocessed_reward_set( &mut tx, &first_prepare_sn.sortition_id, @@ -1603,6 +1636,7 @@ impl< /// block can be re-processed in that event. fn undo_stacks_block_orphaning( burnchain_conn: &DBConn, + burnchain_indexer: &B, ic: &SortitionDBConn, chainstate_db_tx: &mut DBTx, first_invalidate_start_block: u64, @@ -1613,8 +1647,11 @@ impl< first_invalidate_start_block, last_invalidate_start_block ); for burn_height in first_invalidate_start_block..(last_invalidate_start_block + 1) { - let burn_header = match BurnchainDB::get_burnchain_header(burnchain_conn, burn_height)? - { + let burn_header = match BurnchainDB::get_burnchain_header( + burnchain_conn, + burnchain_indexer, + burn_height, + )? { Some(hdr) => hdr, None => { continue; @@ -1840,6 +1877,7 @@ impl< // sortitions let revalidated_burn_header = BurnchainDB::get_burnchain_header( self.burnchain_blocks_db.conn(), + &self.burnchain_indexer, first_invalidate_start_block - 1, ) .expect("FATAL: failed to read burnchain DB") @@ -1854,6 +1892,7 @@ impl< // invalidate all descendant sortitions, no matter what. let invalidated_burn_header = BurnchainDB::get_burnchain_header( self.burnchain_blocks_db.conn(), + &self.burnchain_indexer, last_invalidate_start_block - 1, ) .expect("FATAL: failed to read burnchain DB") @@ -2045,6 +2084,7 @@ impl< // un-orphan blocks that had been orphaned but were tied to this now-revalidated sortition history Self::undo_stacks_block_orphaning( &self.burnchain_blocks_db.conn(), + &self.burnchain_indexer, &ic, &mut chainstate_db_tx, first_invalidate_start_block, @@ -2319,13 +2359,13 @@ impl< if self.config.require_affirmed_anchor_blocks { // missing this anchor block -- cannot proceed until we have it info!( - "Burnchain block processing stops due to missing affirmed anchor block {}", + "Burnchain block processing stops due to missing affirmed anchor stacks block hash {}", &missing_anchor_block ); return Ok(Some(missing_anchor_block)); } else { // this and descendant sortitions might already exist - info!("Burnchain block processing will continue in spite of missing affirmed anchor block {}", &missing_anchor_block); + info!("Burnchain block processing will continue in spite of missing affirmed anchor stacks block hash {}", &missing_anchor_block); } } } @@ -2407,6 +2447,8 @@ impl< return false; } + // TODO: add tests from mutation testing results #4852 + #[cfg_attr(test, mutants::skip)] /// Handle a new burnchain block, optionally rolling back the canonical PoX sortition history /// and setting it up to be replayed in the event the network affirms a different history. If /// this happens, *and* if re-processing the new affirmed history is *blocked on* the @@ -2578,7 +2620,7 @@ impl< self.check_missing_anchor_block(&header, &canonical_affirmation_map, rc_info)? { info!( - "Burnchain block processing stops due to missing affirmed anchor block {}", + "Burnchain block processing stops due to missing affirmed anchor stacks block hash {}", &missing_anchor_block ); return Ok(Some(missing_anchor_block)); @@ -2766,7 +2808,7 @@ impl< self.process_new_pox_anchor(pox_anchor, already_processed_burn_blocks)? { info!( - "Burnchain block processing stops due to missing affirmed anchor block {}", + "Burnchain block processing stops due to missing affirmed anchor stacks block hash {}", &expected_anchor_block_hash ); return Ok(Some(expected_anchor_block_hash)); @@ -2922,6 +2964,9 @@ impl< "attachments_count" => attachments_instances.len(), "index_block_hash" => %block_receipt.header.index_block_hash(), "stacks_height" => block_receipt.header.stacks_block_height, + "burn_height" => block_receipt.header.burn_header_height, + "burn_block_hash" => %block_receipt.header.burn_header_hash, + "consensus_hash" => %block_receipt.header.consensus_hash, ); if let Some(atlas_db) = atlas_db { for new_attachment in attachments_instances.into_iter() { @@ -3102,12 +3147,29 @@ impl< == &AffirmationMapEntry::PoxAnchorBlockPresent { // yup, we're expecting this - debug!("Discovered an old anchor block: {} (height {}, rc {}) with heaviest affirmation map {}", pox_anchor, commit.block_height, reward_cycle, &heaviest_am); - info!("Discovered an old anchor block: {}", pox_anchor); + debug!("Discovered an old anchor block: {}", pox_anchor; + "height" => commit.block_height, + "burn_block_hash" => %commit.burn_header_hash, + "stacks_block_hash" => %commit.block_header_hash, + "reward_cycle" => reward_cycle, + "heaviest_affirmation_map" => %heaviest_am + ); + info!("Discovered an old anchor block: {}", pox_anchor; + "height" => commit.block_height, + "burn_block_hash" => %commit.burn_header_hash, + "stacks_block_hash" => %commit.block_header_hash, + "reward_cycle" => reward_cycle + ); return Ok(Some(pox_anchor.clone())); } else { // nope -- can ignore - debug!("Discovered unaffirmed old anchor block: {} (height {}, rc {}) with heaviest affirmation map {}", pox_anchor, commit.block_height, reward_cycle, &heaviest_am); + debug!("Discovered unaffirmed old anchor block: {}", pox_anchor; + "height" => commit.block_height, + "burn_block_hash" => %commit.burn_header_hash, + "stacks_block_hash" => %commit.block_header_hash, + "reward_cycle" => reward_cycle, + "heaviest_affirmation_map" => %heaviest_am + ); return Ok(None); } } else { @@ -3265,11 +3327,11 @@ impl< // update cost estimator if let Some(ref mut estimator) = self.cost_estimator { - let stacks_epoch = self - .sortition_db - .index_conn() - .get_stacks_epoch_by_epoch_id(&block_receipt.evaluated_epoch) - .expect("Could not find a stacks epoch."); + let stacks_epoch = SortitionDB::get_stacks_epoch_by_epoch_id( + self.sortition_db.conn(), + &block_receipt.evaluated_epoch, + )? + .expect("Could not find a stacks epoch."); estimator.notify_block( &block_receipt.tx_receipts, &stacks_epoch.block_limit, @@ -3279,11 +3341,11 @@ impl< // update fee estimator if let Some(ref mut estimator) = self.fee_estimator { - let stacks_epoch = self - .sortition_db - .index_conn() - .get_stacks_epoch_by_epoch_id(&block_receipt.evaluated_epoch) - .expect("Could not find a stacks epoch."); + let stacks_epoch = SortitionDB::get_stacks_epoch_by_epoch_id( + self.sortition_db.conn(), + &block_receipt.evaluated_epoch, + )? + .expect("Could not find a stacks epoch."); if let Err(e) = estimator.notify_block(&block_receipt, &stacks_epoch.block_limit) { @@ -3386,7 +3448,10 @@ impl< info!( "Reprocessing with anchor block information, starting at block height: {}", - prep_end.block_height + prep_end.block_height; + "consensus_hash" => %prep_end.consensus_hash, + "burn_block_hash" => %prep_end.burn_header_hash, + "stacks_block_height" => prep_end.stacks_block_height ); let mut pox_id = self.sortition_db.get_pox_id(sortition_id)?; pox_id.extend_with_present_block(); @@ -3503,6 +3568,7 @@ impl SortitionDBMigrator { .pox_constants .reward_cycle_to_block_height(sort_db.first_block_height, reward_cycle) .saturating_sub(1); + let sort_tip = SortitionDB::get_canonical_burn_chain_tip(sort_db.conn())?; let ancestor_sn = { diff --git a/stackslib/src/chainstate/coordinator/tests.rs b/stackslib/src/chainstate/coordinator/tests.rs index d5073c8f856..7bd06aaaeac 100644 --- a/stackslib/src/chainstate/coordinator/tests.rs +++ b/stackslib/src/chainstate/coordinator/tests.rs @@ -667,7 +667,7 @@ fn make_genesis_block_with_recipients( ) .unwrap(); - let iconn = sort_db.index_conn(); + let iconn = sort_db.index_handle_at_tip(); let mut miner_epoch_info = builder.pre_epoch_begin(state, &iconn, true).unwrap(); let ast_rules = miner_epoch_info.ast_rules.clone(); let mut epoch_tx = builder @@ -698,6 +698,7 @@ fn make_genesis_block_with_recipients( let commit_op = LeaderBlockCommitOp { sunset_burn: 0, + treatment: vec![], block_header_hash: block.block_hash(), burn_fee: my_burn, input: (Txid([0; 32]), 0), @@ -922,7 +923,7 @@ fn make_stacks_block_with_input( let total_burn = parents_sortition.total_burn; - let iconn = sort_db.index_conn(); + let iconn = sort_db.index_handle_at_tip(); let mut builder = StacksBlockBuilder::make_regtest_block_builder( burnchain, @@ -970,6 +971,7 @@ fn make_stacks_block_with_input( let commit_op = LeaderBlockCommitOp { sunset_burn, + treatment: vec![], block_header_hash: block.block_hash(), burn_fee: my_burn, input, @@ -1286,7 +1288,7 @@ fn missed_block_commits_2_05() { assert_eq!( chainstate .with_read_only_clarity_tx( - &sort_db.index_conn(), + &sort_db.index_handle_at_tip(), &StacksBlockId::new(&stacks_tip.0, &stacks_tip.1), |conn| conn .with_readonly_clarity_env( @@ -1636,7 +1638,7 @@ fn missed_block_commits_2_1() { assert_eq!( chainstate .with_read_only_clarity_tx( - &sort_db.index_conn(), + &sort_db.index_handle_at_tip(), &StacksBlockId::new(&stacks_tip.0, &stacks_tip.1), |conn| conn .with_readonly_clarity_env( @@ -1981,7 +1983,7 @@ fn late_block_commits_2_1() { assert_eq!( chainstate .with_read_only_clarity_tx( - &sort_db.index_conn(), + &sort_db.index_handle_at_tip(), &StacksBlockId::new(&stacks_tip.0, &stacks_tip.1), |conn| conn .with_readonly_clarity_env( @@ -2154,7 +2156,7 @@ fn test_simple_setup() { assert_eq!( chainstate .with_read_only_clarity_tx( - &sort_db.index_conn(), + &sort_db.index_handle_at_tip(), &StacksBlockId::new(&stacks_tip.0, &stacks_tip.1), |conn| conn .with_readonly_clarity_env( @@ -2408,6 +2410,7 @@ fn test_sortition_with_reward_set() { let bad_block_recipients = Some(RewardSetInfo { anchor_block: BlockHeaderHash([0; 32]), recipients, + allow_nakamoto_punishment: false, }); let (bad_outs_op, _) = make_stacks_block_with_recipients( &sort_db, @@ -2464,7 +2467,7 @@ fn test_sortition_with_reward_set() { assert_eq!( chainstate .with_read_only_clarity_tx( - &sort_db.index_conn(), + &sort_db.index_handle_at_tip(), &StacksBlockId::new(&stacks_tip.0, &stacks_tip.1), |conn| conn .with_readonly_clarity_env( @@ -2653,6 +2656,7 @@ fn test_sortition_with_burner_reward_set() { let bad_block_recipients = Some(RewardSetInfo { anchor_block: BlockHeaderHash([0; 32]), recipients, + allow_nakamoto_punishment: false, }); let (bad_outs_op, _) = make_stacks_block_with_recipients( &sort_db, @@ -2709,7 +2713,7 @@ fn test_sortition_with_burner_reward_set() { assert_eq!( chainstate .with_read_only_clarity_tx( - &sort_db.index_conn(), + &sort_db.index_handle_at_tip(), &StacksBlockId::new(&stacks_tip.0, &stacks_tip.1), |conn| conn .with_readonly_clarity_env( @@ -2916,7 +2920,7 @@ fn test_pox_btc_ops() { let mut chainstate = get_chainstate(path); let (stacker_balance, burn_height) = chainstate .with_read_only_clarity_tx( - &sort_db.index_conn(), + &sort_db.index_handle_at_tip(), &StacksBlockId::new(&stacks_tip.0, &stacks_tip.1), |conn| { conn.with_clarity_db_readonly(|db| { @@ -3001,7 +3005,7 @@ fn test_pox_btc_ops() { assert_eq!( chainstate .with_read_only_clarity_tx( - &sort_db.index_conn(), + &sort_db.index_handle_at_tip(), &StacksBlockId::new(&stacks_tip.0, &stacks_tip.1), |conn| conn .with_readonly_clarity_env( @@ -3219,7 +3223,7 @@ fn test_stx_transfer_btc_ops() { let mut chainstate = get_chainstate(path); let (sender_balance, burn_height) = chainstate .with_read_only_clarity_tx( - &sort_db.index_conn(), + &sort_db.index_handle_at_tip(), &StacksBlockId::new(&stacks_tip.0, &stacks_tip.1), |conn| { conn.with_clarity_db_readonly(|db| { @@ -3234,7 +3238,7 @@ fn test_stx_transfer_btc_ops() { let (recipient_balance, burn_height) = chainstate .with_read_only_clarity_tx( - &sort_db.index_conn(), + &sort_db.index_handle_at_tip(), &StacksBlockId::new(&stacks_tip.0, &stacks_tip.1), |conn| { conn.with_clarity_db_readonly(|db| { @@ -3348,7 +3352,7 @@ fn test_stx_transfer_btc_ops() { assert_eq!( chainstate .with_read_only_clarity_tx( - &sort_db.index_conn(), + &sort_db.index_handle_at_tip(), &StacksBlockId::new(&stacks_tip.0, &stacks_tip.1), |conn| conn .with_readonly_clarity_env( @@ -3691,13 +3695,13 @@ fn test_delegate_stx_btc_ops() { ); let first_delegation_info = get_delegation_info_pox_2( &mut chainstate, - &sort_db.index_conn(), + &sort_db.index_handle_at_tip(), &parent_tip, &first_del, ); let second_delegation_info = get_delegation_info_pox_2( &mut chainstate, - &sort_db.index_conn(), + &sort_db.index_handle_at_tip(), &parent_tip, &second_del, ); @@ -3744,7 +3748,7 @@ fn test_delegate_stx_btc_ops() { assert_eq!( chainstate .with_read_only_clarity_tx( - &sort_db.index_conn(), + &sort_db.index_handle_at_tip(), &StacksBlockId::new(&stacks_tip.0, &stacks_tip.1), |conn| conn .with_readonly_clarity_env( @@ -3988,7 +3992,7 @@ fn test_initial_coinbase_reward_distributions() { assert_eq!( chainstate .with_read_only_clarity_tx( - &sort_db.index_conn(), + &sort_db.index_handle_at_tip(), &StacksBlockId::new(&stacks_tip.0, &stacks_tip.1), |conn| conn .with_readonly_clarity_env( @@ -4156,7 +4160,7 @@ fn test_epoch_switch_cost_contract_instantiation() { assert_eq!( chainstate .with_read_only_clarity_tx( - &sort_db.index_conn(), + &sort_db.index_handle_at_tip(), &StacksBlockId::new(&stacks_tip.0, &stacks_tip.1), |conn| conn.with_clarity_db_readonly(|db| db .get_stacks_epoch(burn_block_height as u32) @@ -4176,7 +4180,7 @@ fn test_epoch_switch_cost_contract_instantiation() { assert_eq!( chainstate .with_read_only_clarity_tx( - &sort_db.index_conn(), + &sort_db.index_handle_at_tip(), &StacksBlockId::new(&stacks_tip.0, &stacks_tip.1), |conn| { conn.with_clarity_db_readonly(|db| { @@ -4193,7 +4197,7 @@ fn test_epoch_switch_cost_contract_instantiation() { // check that costs-2 contract DNE before epoch 2.05, and that it does exist after let does_costs_2_contract_exist = chainstate .with_read_only_clarity_tx( - &sort_db.index_conn(), + &sort_db.index_handle_at_tip(), &StacksBlockId::new(&stacks_tip.0, &stacks_tip.1), |conn| { conn.with_clarity_db_readonly(|db| { @@ -4360,7 +4364,7 @@ fn test_epoch_switch_pox_2_contract_instantiation() { assert_eq!( chainstate .with_read_only_clarity_tx( - &sort_db.index_conn(), + &sort_db.index_handle_at_tip(), &StacksBlockId::new(&stacks_tip.0, &stacks_tip.1), |conn| conn.with_clarity_db_readonly(|db| db .get_stacks_epoch(burn_block_height as u32) @@ -4381,7 +4385,7 @@ fn test_epoch_switch_pox_2_contract_instantiation() { assert_eq!( chainstate .with_read_only_clarity_tx( - &sort_db.index_conn(), + &sort_db.index_handle_at_tip(), &StacksBlockId::new(&stacks_tip.0, &stacks_tip.1), |conn| { conn.with_clarity_db_readonly(|db| { @@ -4398,7 +4402,7 @@ fn test_epoch_switch_pox_2_contract_instantiation() { // check that pox-2 contract DNE before epoch 2.1, and that it does exist after let does_pox_2_contract_exist = chainstate .with_read_only_clarity_tx( - &sort_db.index_conn(), + &sort_db.index_handle_at_tip(), &StacksBlockId::new(&stacks_tip.0, &stacks_tip.1), |conn| { conn.with_clarity_db_readonly(|db| { @@ -4569,7 +4573,7 @@ fn test_epoch_switch_pox_3_contract_instantiation() { assert_eq!( chainstate .with_read_only_clarity_tx( - &sort_db.index_conn(), + &sort_db.index_handle_at_tip(), &StacksBlockId::new(&stacks_tip.0, &stacks_tip.1), |conn| conn.with_clarity_db_readonly(|db| db .get_stacks_epoch(burn_block_height as u32) @@ -4590,7 +4594,7 @@ fn test_epoch_switch_pox_3_contract_instantiation() { assert_eq!( chainstate .with_read_only_clarity_tx( - &sort_db.index_conn(), + &sort_db.index_handle_at_tip(), &StacksBlockId::new(&stacks_tip.0, &stacks_tip.1), |conn| { conn.with_clarity_db_readonly(|db| { @@ -4607,7 +4611,7 @@ fn test_epoch_switch_pox_3_contract_instantiation() { // check that pox-3 contract DNE before epoch 2.4, and that it does exist after let does_pox_3_contract_exist = chainstate .with_read_only_clarity_tx( - &sort_db.index_conn(), + &sort_db.index_handle_at_tip(), &StacksBlockId::new(&stacks_tip.0, &stacks_tip.1), |conn| { conn.with_clarity_db_readonly(|db| { @@ -4855,7 +4859,7 @@ fn atlas_stop_start() { // check that the bns contract exists let does_bns_contract_exist = chainstate .with_read_only_clarity_tx( - &sort_db.index_conn(), + &sort_db.index_handle_at_tip(), &StacksBlockId::new(&stacks_tip.0, &stacks_tip.1), |conn| { conn.with_clarity_db_readonly(|db| db.get_contract(&boot_code_id("bns", false))) @@ -5180,7 +5184,7 @@ fn test_epoch_verify_active_pox_contract() { // Query the pox.clar contract to ensure the total stacked amount is as expected let amount_locked_pox_1_res = get_total_stacked_info( &mut chainstate, - &sort_db.index_conn(), + &sort_db.index_handle_at_tip(), &parent_tip, curr_reward_cycle, false, @@ -5214,7 +5218,7 @@ fn test_epoch_verify_active_pox_contract() { // Query the pox-2.clar contract to ensure the total stacked amount is as expected let amount_locked_pox_2_res = get_total_stacked_info( &mut chainstate, - &sort_db.index_conn(), + &sort_db.index_handle_at_tip(), &parent_tip, curr_reward_cycle, true, @@ -5516,7 +5520,7 @@ fn test_sortition_with_sunset() { assert_eq!( chainstate .with_read_only_clarity_tx( - &sort_db.index_conn(), + &sort_db.index_handle_at_tip(), &StacksBlockId::new(&stacks_tip.0, &stacks_tip.1), |conn| conn .with_readonly_clarity_env( @@ -5864,7 +5868,7 @@ fn test_sortition_with_sunset_and_epoch_switch() { assert_eq!( chainstate .with_read_only_clarity_tx( - &sort_db.index_conn(), + &sort_db.index_handle_at_tip(), &StacksBlockId::new(&stacks_tip.0, &stacks_tip.1), |conn| conn .with_readonly_clarity_env( @@ -6774,7 +6778,7 @@ fn eval_at_chain_tip(chainstate_path: &str, sort_db: &SortitionDB, eval: &str) - let mut chainstate = get_chainstate(chainstate_path); chainstate .with_read_only_clarity_tx( - &sort_db.index_conn(), + &sort_db.index_handle_at_tip(), &StacksBlockId::new(&stacks_tip.0, &stacks_tip.1), |conn| { conn.with_readonly_clarity_env( diff --git a/stackslib/src/chainstate/nakamoto/coordinator/mod.rs b/stackslib/src/chainstate/nakamoto/coordinator/mod.rs index f399615c80d..15cc7f08526 100644 --- a/stackslib/src/chainstate/nakamoto/coordinator/mod.rs +++ b/stackslib/src/chainstate/nakamoto/coordinator/mod.rs @@ -17,8 +17,10 @@ use std::collections::VecDeque; use std::sync::{Arc, Mutex}; +use clarity::boot_util::boot_code_id; +use clarity::vm::ast::ASTRules; use clarity::vm::clarity::ClarityConnection; -use clarity::vm::database::BurnStateDB; +use clarity::vm::database::{BurnStateDB, HeadersDB}; use clarity::vm::types::PrincipalData; use stacks_common::types::chainstate::{ BlockHeaderHash, BurnchainHeaderHash, SortitionId, StacksAddress, StacksBlockId, @@ -28,7 +30,9 @@ use stacks_common::types::{StacksEpoch, StacksEpochId}; use crate::burnchains::db::{BurnchainBlockData, BurnchainDB, BurnchainHeaderReader}; use crate::burnchains::{Burnchain, BurnchainBlockHeader}; -use crate::chainstate::burn::db::sortdb::SortitionDB; +use crate::chainstate::burn::db::sortdb::{ + get_ancestor_sort_id, SortitionDB, SortitionHandle, SortitionHandleConn, +}; use crate::chainstate::burn::operations::leader_block_commit::RewardSetInfo; use crate::chainstate::burn::BlockSnapshot; use crate::chainstate::coordinator::comm::{ @@ -42,9 +46,13 @@ use crate::chainstate::coordinator::{ }; use crate::chainstate::nakamoto::NakamotoChainState; use crate::chainstate::stacks::boot::{RewardSet, SIGNERS_NAME}; -use crate::chainstate::stacks::db::{StacksBlockHeaderTypes, StacksChainState}; +use crate::chainstate::stacks::db::{ + StacksBlockHeaderTypes, StacksChainState, StacksDBConn, StacksHeaderInfo, +}; +use crate::chainstate::stacks::index::marf::MarfConnection; use crate::chainstate::stacks::miner::{signal_mining_blocked, signal_mining_ready, MinerStatus}; use crate::chainstate::stacks::Error as ChainstateError; +use crate::clarity_vm::database::HeadersDBConn; use crate::cost_estimates::{CostEstimator, FeeEstimator}; use crate::monitoring::increment_stx_blocks_processed_counter; use crate::net::Error as NetError; @@ -90,6 +98,21 @@ impl<'a, T: BlockEventDispatcher> OnChainRewardSetProvider<'a, T> { let cycle = burnchain .block_height_to_reward_cycle(cycle_start_burn_height) .expect("FATAL: no reward cycle for burn height"); + self.read_reward_set_nakamoto_of_cycle(cycle, chainstate, sortdb, block_id, debug_log) + } + + /// Read a reward_set written while updating .signers at a given cycle_id + /// `debug_log` should be set to true if the reward set loading should + /// log messages as `debug!` instead of `error!` or `info!`. This allows + /// RPC endpoints to expose this without flooding loggers. + pub fn read_reward_set_nakamoto_of_cycle( + &self, + cycle: u64, + chainstate: &mut StacksChainState, + sortdb: &SortitionDB, + block_id: &StacksBlockId, + debug_log: bool, + ) -> Result { // figure out the block ID let Some(coinbase_height_of_calculation) = chainstate .eval_boot_code_read_only( @@ -115,8 +138,61 @@ impl<'a, T: BlockEventDispatcher> OnChainRewardSetProvider<'a, T> { return Err(Error::PoXAnchorBlockRequired); }; + self.read_reward_set_at_calculated_block( + coinbase_height_of_calculation, + chainstate, + block_id, + debug_log, + ) + } + + pub fn get_height_of_pox_calculation( + &self, + cycle: u64, + chainstate: &mut StacksChainState, + sort_handle: &SortitionHandleConn, + block_id: &StacksBlockId, + ) -> Result { + let ro_index = chainstate.state_index.reopen_readonly()?; + let headers_db = HeadersDBConn(StacksDBConn::new(&ro_index, ())); + let Some(coinbase_height_of_calculation) = chainstate + .clarity_state + .eval_read_only( + block_id, + &headers_db, + sort_handle, + &boot_code_id(SIGNERS_NAME, chainstate.mainnet), + &format!("(map-get? cycle-set-height u{})", cycle), + ASTRules::PrecheckSize, + ) + .map_err(ChainstateError::ClarityError)? + .expect_optional() + .map_err(|e| Error::ChainstateError(e.into()))? + .map(|x| { + let as_u128 = x.expect_u128()?; + Ok(u64::try_from(as_u128).expect("FATAL: block height exceeded u64")) + }) + .transpose() + .map_err(|e| Error::ChainstateError(ChainstateError::ClarityError(e)))? + else { + error!( + "The reward set was not written to .signers before it was needed by Nakamoto"; + "cycle_number" => cycle, + ); + return Err(Error::PoXAnchorBlockRequired); + }; + Ok(coinbase_height_of_calculation) + } + + pub fn read_reward_set_at_calculated_block( + &self, + coinbase_height_of_calculation: u64, + chainstate: &mut StacksChainState, + block_id: &StacksBlockId, + debug_log: bool, + ) -> Result { let Some(reward_set_block) = NakamotoChainState::get_header_by_coinbase_height( - &mut chainstate.index_tx_begin()?, + &mut chainstate.index_tx_begin(), block_id, coinbase_height_of_calculation, )? @@ -155,6 +231,9 @@ impl<'a, T: BlockEventDispatcher> OnChainRewardSetProvider<'a, T> { debug_log, "PoX reward set loaded from written block state"; "reward_set_block_id" => %reward_set_block.index_block_hash(), + "burn_block_hash" => %reward_set_block.burn_header_hash, + "stacks_block_height" => reward_set_block.stacks_block_height, + "burn_header_height" => reward_set_block.burn_header_height, ); if reward_set.signers.is_none() { @@ -200,9 +279,13 @@ fn find_prepare_phase_sortitions( Ok(sns) } -/// Try to get the reward cycle information for a Nakamoto reward cycle. +/// Try to get the reward cycle information for a Nakamoto reward cycle, identified by the +/// burn_height. The reward cycle info returned will be from the reward cycle that is active as of +/// `burn_height`. `sortition_tip` can be any sortition ID that's at a higher height than +/// `burn_height`. +/// /// In Nakamoto, the PoX anchor block for reward cycle _R_ is the _first_ Stacks block mined in the -/// _last_ tenure of _R - 1_'s reward phase phase (i.e. which takes place toward the end of reward cycle). +/// _last_ tenure of _R - 1_'s reward phase (i.e. which takes place toward the end of reward cycle). /// The reason it must be this way is because its hash will be in the block-commit for the first /// prepare-phase tenure of cycle _R_ (which is required for the PoX ancestry query in the /// block-commit validation logic). @@ -210,19 +293,15 @@ fn find_prepare_phase_sortitions( /// If this method returns None, the caller should try again when there are more Stacks blocks. In /// Nakamoto, every reward cycle _must_ have a PoX anchor block; otherwise, the chain halts. /// -/// N.B. this method assumes that the prepare phase is comprised _solely_ of Nakamoto tenures. It -/// will not work if any of the prepare-phase tenures are from epoch 2.x. -/// /// Returns Ok(Some(reward-cycle-info)) if we found the first sortition in the prepare phase. /// Returns Ok(None) if we're still waiting for the PoX anchor block sortition /// Returns Err(Error::NotInPreparePhase) if `burn_height` is not in the prepare phase -/// Returns Err(Error::RewardCycleAlreadyProcessed) if the reward set for this reward cycle has -/// already been processed. pub fn get_nakamoto_reward_cycle_info( burn_height: u64, sortition_tip: &SortitionId, burnchain: &Burnchain, chain_state: &mut StacksChainState, + stacks_tip: &StacksBlockId, sort_db: &mut SortitionDB, provider: &U, ) -> Result, Error> { @@ -235,42 +314,169 @@ pub fn get_nakamoto_reward_cycle_info( "FATAL: called a nakamoto function outside of epoch 3" ); - if !burnchain.is_in_prepare_phase(burn_height) { - return Err(Error::NotInPreparePhase); - } - - // calculating the reward set for the _next_ reward cycle + // calculating the reward set for the current reward cycle let reward_cycle = burnchain - .next_reward_cycle(burn_height) + .pox_reward_cycle(burn_height) .expect("FATAL: no reward cycle for burn height"); - let reward_start_height = burnchain.reward_cycle_to_block_height(reward_cycle); debug!("Processing reward set for Nakamoto reward cycle"; + "stacks_tip" => %stacks_tip, "burn_height" => burn_height, "reward_cycle" => reward_cycle, "reward_cycle_length" => burnchain.pox_constants.reward_cycle_length, "prepare_phase_length" => burnchain.pox_constants.prepare_length); + let Some((rc_info, anchor_block_header)) = load_nakamoto_reward_set( + reward_cycle, + sortition_tip, + burnchain, + chain_state, + stacks_tip, + sort_db, + provider, + )? + else { + return Ok(None); + }; + + let block_id = match anchor_block_header.anchored_header { + StacksBlockHeaderTypes::Epoch2(..) => anchor_block_header.index_block_hash(), + StacksBlockHeaderTypes::Nakamoto(ref header) => header.block_id(), + }; + + info!( + "Anchor block selected"; + "cycle" => reward_cycle, + "block_id" => %block_id, + "consensus_hash" => %anchor_block_header.consensus_hash, + "burn_height" => anchor_block_header.burn_header_height, + "stacks_block_height" => anchor_block_header.stacks_block_height, + "burn_block_hash" => %anchor_block_header.burn_header_hash + ); + + return Ok(Some(rc_info)); +} + +/// Helper to get the Nakamoto reward set for a given reward cycle, identified by `reward_cycle`. +/// +/// In all but the first Nakamoto reward cycle, this will load up the stored reward set from the +/// Nakamoto chain state. In the first Nakamoto reward cycle, where the reward set is computed +/// from epoch2 state, the reward set will be loaded from the sortition DB (which is the only place +/// it will be stored). +/// +/// Returns Ok(Some((reward set info, PoX anchor block header))) on success +/// Returns Ok(None) if the reward set is not yet known, but could be known by the time a +/// subsequent call is made. +pub fn load_nakamoto_reward_set( + reward_cycle: u64, + sortition_tip: &SortitionId, + burnchain: &Burnchain, + chain_state: &mut StacksChainState, + stacks_tip: &StacksBlockId, + sort_db: &SortitionDB, + provider: &U, +) -> Result, Error> { + let prepare_end_height = burnchain + .reward_cycle_to_block_height(reward_cycle) + .saturating_sub(1); + + let epoch_at_height = SortitionDB::get_stacks_epoch(sort_db.conn(), prepare_end_height)? + .unwrap_or_else(|| { + panic!( + "FATAL: no epoch defined for burn height {}", + prepare_end_height + ) + }); + + let Some(prepare_end_sortition_id) = + get_ancestor_sort_id(&sort_db.index_conn(), prepare_end_height, sortition_tip)? + else { + // reward cycle is too far in the future + warn!("Requested reward cycle start ancestor sortition ID for cycle {} prepare-end height {}, but tip is {}", reward_cycle, prepare_end_height, sortition_tip); + return Ok(None); + }; + // Find the first Stacks block in this reward cycle's preceding prepare phase. // This block will have invoked `.signers.stackerdb-set-signer-slots()` with the reward set. // Note that we may not have processed it yet. But, if we do find it, then it's // unique (and since Nakamoto Stacks blocks are processed in order, the anchor block // cannot change later). - let prepare_phase_sortitions = - find_prepare_phase_sortitions(sort_db, burnchain, sortition_tip)?; - - // did we already calculate the reward cycle info? If so, then return it. - let first_sortition_id = if let Some(first_sn) = prepare_phase_sortitions.first() { - if let Some(persisted_reward_cycle_info) = - SortitionDB::get_preprocessed_reward_set(sort_db.conn(), &first_sn.sortition_id)? + let first_epoch30_reward_cycle = burnchain + .pox_reward_cycle(epoch_at_height.start_height) + .expect("FATAL: no reward cycle for epoch 3.0 start height"); + + if !epoch_at_height + .epoch_id + .uses_nakamoto_reward_set(reward_cycle, first_epoch30_reward_cycle) + { + // in epoch 2.5, and in the first reward cycle of epoch 3.0, the reward set can *only* be found in the sortition DB. + // The nakamoto chain-processing rules aren't active yet, so we can't look for the reward + // cycle info in the nakamoto chain state. + if let Ok(persisted_reward_cycle_info) = + sort_db.get_preprocessed_reward_set_of(&prepare_end_sortition_id) { - return Ok(Some(persisted_reward_cycle_info)); + if persisted_reward_cycle_info + .known_selected_anchor_block() + .is_none() + { + debug!("No reward set known yet for prepare phase"; + "sortition_tip" => %sortition_tip, + "prepare_end_sortition_id" => %prepare_end_sortition_id); + return Ok(None); + } + + // find the corresponding Stacks anchor block header + let Some((anchor_block_hash, _)) = persisted_reward_cycle_info.selected_anchor_block() + else { + // should be unreachable + error!("No anchor block known for persisted reward set"; + "sortition_tip" => %sortition_tip, + "prepare_end_sortition_id" => %prepare_end_sortition_id); + return Ok(None); + }; + + let ic = sort_db.index_conn(); + let Some(anchor_block_snapshot) = + SortitionDB::get_block_snapshot_for_winning_stacks_block( + &ic, + &prepare_end_sortition_id, + anchor_block_hash, + )? + else { + // should be unreachable + error!("No ancestor block snapshot for anchor block"; + "anchor_block_hash" => %anchor_block_hash, + "sortition_tip" => %sortition_tip, + "prepare_end_sortition_id" => %prepare_end_sortition_id); + + return Ok(None); + }; + + let Some(anchor_block_header) = + StacksChainState::get_stacks_block_header_info_by_consensus_hash( + chain_state.db(), + &anchor_block_snapshot.consensus_hash, + )? + else { + // should be unreachable + error!("No block header for anchor block"; + "consensus_hash" => %anchor_block_snapshot.consensus_hash, + "anchor_block_hash" => %anchor_block_hash); + return Ok(None); + }; + + debug!("Loaded reward set calculated in epoch 2.5 for reward cycle {} (which is in epoch {})", reward_cycle, epoch_at_height.epoch_id); + return Ok(Some((persisted_reward_cycle_info, anchor_block_header))); } - first_sn.sortition_id.clone() - } else { - // can't do anything + + // no reward set known yet. It's possible that it simply hasn't been processed yet. + debug!("No pre-processed PoX reward set known for pre-Nakamoto cycle {reward_cycle}"); return Ok(None); - }; + } + + // find the reward cycle's prepare-phase sortitions (in the preceding reward cycle) + let prepare_phase_sortitions = + find_prepare_phase_sortitions(sort_db, burnchain, &prepare_end_sortition_id)?; // iterate over the prepare_phase_sortitions, finding the first such sortition // with a processed stacks block @@ -282,7 +488,8 @@ pub fn get_nakamoto_reward_cycle_info( } match NakamotoChainState::get_nakamoto_tenure_start_block_header( - chain_state.db(), + &mut chain_state.index_conn(), + stacks_tip, &sn.consensus_hash, ) { Ok(Some(x)) => return Some(Ok(x)), @@ -317,7 +524,7 @@ pub fn get_nakamoto_reward_cycle_info( .expect("FATAL: no snapshot for winning PoX anchor block"); // make sure the `anchor_block` field is the same as whatever goes into the block-commit, - // or PoX ancestry queries won't work + // or PoX ancestry queries won't work. let (block_id, stacks_block_hash) = match anchor_block_header.anchored_header { StacksBlockHeaderTypes::Epoch2(ref header) => ( StacksBlockId::new(&anchor_block_header.consensus_hash, &header.block_hash()), @@ -330,19 +537,16 @@ pub fn get_nakamoto_reward_cycle_info( let txid = anchor_block_sn.winning_block_txid; - info!( - "Anchor block selected"; - "cycle" => reward_cycle, - "block_id" => %block_id, - "consensus_hash" => %anchor_block_header.consensus_hash, - "burn_height" => anchor_block_header.burn_header_height, - "anchor_chain_tip" => %anchor_block_header.index_block_hash(), - "anchor_chain_tip_height" => %anchor_block_header.burn_header_height, - "first_prepare_sortition_id" => %first_sortition_id - ); + test_debug!("Stacks anchor block found"; + "block_id" => %block_id, + "block_hash" => %stacks_block_hash, + "consensus_hash" => %anchor_block_sn.consensus_hash, + "txid" => %txid, + "prepare_end_height" => %prepare_end_height, + "burnchain_height" => %anchor_block_sn.block_height); let reward_set = provider.get_reward_set_nakamoto( - reward_start_height, + prepare_end_height, chain_state, burnchain, sort_db, @@ -350,7 +554,10 @@ pub fn get_nakamoto_reward_cycle_info( )?; debug!( "Stacks anchor block (ch {}) {} cycle {} is processed", - &anchor_block_header.consensus_hash, &block_id, reward_cycle + &anchor_block_header.consensus_hash, &block_id, reward_cycle; + "anchor.consensus_hash" => %anchor_block_header.consensus_hash, + "anchor.burn_header_hash" => %anchor_block_header.burn_header_hash, + "anchor.burn_block_height" => anchor_block_header.burn_header_height ); let anchor_status = PoxAnchorBlockStatus::SelectedAndKnown(stacks_block_hash, txid, reward_set); @@ -358,13 +565,7 @@ pub fn get_nakamoto_reward_cycle_info( reward_cycle, anchor_status, }; - - // persist this - let mut tx = sort_db.tx_begin()?; - SortitionDB::store_preprocessed_reward_set(&mut tx, &first_sortition_id, &rc_info)?; - tx.commit()?; - - return Ok(Some(rc_info)); + Ok(Some((rc_info, anchor_block_header))) } /// Get the next PoX recipients in the Nakamoto epoch. @@ -372,42 +573,34 @@ pub fn get_nakamoto_reward_cycle_info( /// * we're guaranteed to have an anchor block /// * we pre-compute the reward set at the start of the prepare phase, so we only need to load it /// up here at the start of the reward phase. +/// `stacks_tip` is the tip that the caller is going to build a block on. pub fn get_nakamoto_next_recipients( sortition_tip: &BlockSnapshot, sort_db: &mut SortitionDB, + chain_state: &mut StacksChainState, + stacks_tip: &StacksBlockId, burnchain: &Burnchain, ) -> Result, Error> { - let reward_cycle_info = if burnchain.is_reward_cycle_start(sortition_tip.block_height + 1) { - // load up new reward cycle info so we can start using *that* - let prepare_phase_sortitions = - find_prepare_phase_sortitions(sort_db, burnchain, &sortition_tip.parent_sortition_id)?; - - // NOTE: this must panic because Nakamoto's first reward cycle has stackers - let first_sn = prepare_phase_sortitions - .first() - .expect("FATAL: unreachable: no prepare-phase sortitions at start of reward cycle"); - - debug!("Get pre-processed reward set"; - "sortition_id" => %first_sn.sortition_id); - - // NOTE: don't panic here. The only caller of this method is a stacks-node miner, - // and they *may* have invoked this before they've processed the prepare phase. - // That's recoverable by simply waiting to mine until they've processed those - // blocks. - let reward_set = - SortitionDB::get_preprocessed_reward_set(sort_db.conn(), &first_sn.sortition_id)? - .ok_or_else(|| { - warn!( - "No preprocessed reward set found"; - "reward_cycle_start" => sortition_tip.block_height + 1, - "first_prepare_sortition_id" => %first_sn.sortition_id - ); - Error::PoXNotProcessedYet - })?; - Some(reward_set) - } else { - None - }; + let reward_cycle_info = + if burnchain.is_reward_cycle_start(sortition_tip.block_height.saturating_add(1)) { + let Some((reward_set, _)) = load_nakamoto_reward_set( + burnchain + .pox_reward_cycle(sortition_tip.block_height.saturating_add(1)) + .expect("Sortition block height has no reward cycle"), + &sortition_tip.sortition_id, + burnchain, + chain_state, + stacks_tip, + sort_db, + &OnChainRewardSetProvider::new(), + )? + else { + return Ok(None); + }; + Some(reward_set) + } else { + None + }; sort_db .get_next_block_recipients(burnchain, sortition_tip, reward_cycle_info.as_ref()) .map_err(Error::from) @@ -464,10 +657,32 @@ impl< .block_height_to_reward_cycle(epoch3.start_height) .expect("FATAL: epoch3 block height has no reward cycle"); - // only proceed if we have processed the _anchor block_ for this reward cycle - let handle_conn = self.sortition_db.index_handle(&canonical_sortition_tip); - let last_processed_rc = handle_conn.get_last_processed_reward_cycle()?; - Ok(last_processed_rc >= first_epoch3_reward_cycle) + // NOTE(safety): this is not guaranteed to be the canonical best Stacks tip. + // However, it's safe to use here because we're only interested in loading up the first + // Nakamoto reward set, which uses the epoch2 anchor block selection algorithm. There will + // only be one such reward set in epoch2 rules, since it's tied to a specific block-commit + // (note that this is not true for reward sets generated in Nakamoto prepare phases). + let (local_best_stacks_ch, local_best_stacks_bhh) = + SortitionDB::get_canonical_stacks_chain_tip_hash(self.sortition_db.conn())?; + let local_best_stacks_tip = + StacksBlockId::new(&local_best_stacks_ch, &local_best_stacks_bhh); + + // only proceed if we have processed the _anchor block_ for this reward cycle. + let Some((rc_info, _)) = load_nakamoto_reward_set( + self.burnchain + .pox_reward_cycle(canonical_sn.block_height) + .expect("FATAL: snapshot has no reward cycle"), + &canonical_sn.sortition_id, + &self.burnchain, + &mut self.chain_state_db, + &local_best_stacks_tip, + &self.sortition_db, + &OnChainRewardSetProvider::new(), + )? + else { + return Ok(false); + }; + Ok(rc_info.reward_cycle >= first_epoch3_reward_cycle) } /// This is the main loop body for the coordinator in epoch 3. @@ -543,7 +758,7 @@ impl< match self.handle_new_nakamoto_burnchain_block() { Ok(can_proceed) => { if !can_proceed { - error!("Missing canonical anchor block",); + error!("Missing canonical anchor block"); } } Err(e) => { @@ -567,19 +782,17 @@ impl< /// with Some(pox-anchor-block-hash) until the reward cycle info is processed in the sortition /// DB. pub fn handle_new_nakamoto_stacks_block(&mut self) -> Result, Error> { + debug!("Handle new Nakamoto block"); let canonical_sortition_tip = self.canonical_sortition_tip.clone().expect( "FAIL: processing a new Stacks block, but don't have a canonical sortition tip", ); loop { // process at most one block per loop pass - let mut sortdb_handle = self - .sortition_db - .tx_handle_begin(&canonical_sortition_tip)?; - let mut processed_block_receipt = match NakamotoChainState::process_next_nakamoto_block( &mut self.chain_state_db, - &mut sortdb_handle, + &mut self.sortition_db, + &canonical_sortition_tip, self.dispatcher, ) { Ok(receipt_opt) => receipt_opt, @@ -606,8 +819,6 @@ impl< } }; - sortdb_handle.commit()?; - let Some(block_receipt) = processed_block_receipt.take() else { // out of blocks debug!("No more blocks to process (no receipts)"); @@ -654,11 +865,11 @@ impl< // update cost estimator if let Some(ref mut estimator) = self.cost_estimator { - let stacks_epoch = self - .sortition_db - .index_conn() - .get_stacks_epoch_by_epoch_id(&block_receipt.evaluated_epoch) - .expect("Could not find a stacks epoch."); + let stacks_epoch = SortitionDB::get_stacks_epoch_by_epoch_id( + self.sortition_db.conn(), + &block_receipt.evaluated_epoch, + )? + .expect("Could not find a stacks epoch."); estimator.notify_block( &block_receipt.tx_receipts, &stacks_epoch.block_limit, @@ -668,16 +879,18 @@ impl< // update fee estimator if let Some(ref mut estimator) = self.fee_estimator { - let stacks_epoch = self - .sortition_db - .index_conn() - .get_stacks_epoch_by_epoch_id(&block_receipt.evaluated_epoch) - .expect("Could not find a stacks epoch."); + let stacks_epoch = SortitionDB::get_stacks_epoch_by_epoch_id( + self.sortition_db.conn(), + &block_receipt.evaluated_epoch, + )? + .expect("Could not find a stacks epoch."); if let Err(e) = estimator.notify_block(&block_receipt, &stacks_epoch.block_limit) { warn!("FeeEstimator failed to process block receipt"; - "stacks_block" => %block_hash, - "stacks_height" => %block_receipt.header.stacks_block_height, - "error" => %e); + "stacks_block_hash" => %block_hash, + "stacks_block_height" => %block_receipt.header.stacks_block_height, + "burn_block_hash" => %block_receipt.header.burn_header_hash, + "error" => %e + ); } } @@ -707,8 +920,30 @@ impl< }); let last_processed_reward_cycle = { - let ic = self.sortition_db.index_handle(&canonical_sortition_tip); - ic.get_last_processed_reward_cycle()? + let canonical_sn = SortitionDB::get_block_snapshot( + &self.sortition_db.conn(), + &canonical_sortition_tip, + )? + .ok_or(DBError::NotFoundError)?; + + // check and see if *this block* or one if its ancestors has processed the reward + // cycle data + let Some((rc_info, _)) = load_nakamoto_reward_set( + self.burnchain + .pox_reward_cycle(canonical_sn.block_height) + .expect("FATAL: snapshot has no reward cycle"), + &canonical_sn.sortition_id, + &self.burnchain, + &mut self.chain_state_db, + &canonical_stacks_block_id, + &self.sortition_db, + &OnChainRewardSetProvider::new(), + )? + else { + // no anchor block yet, so try processing another block + continue; + }; + rc_info.reward_cycle }; if last_processed_reward_cycle > current_reward_cycle { @@ -716,7 +951,8 @@ impl< continue; } - // This is the first Stacks block in the prepare phase for the next reward cycle. + // This is the first Stacks block in the prepare phase for the next reward cycle, + // as determined by the history tipped at `canonical_stacks_block_id`. // Pause here and process the next sortitions debug!("Process next reward cycle's sortitions"); self.handle_new_nakamoto_burnchain_block()?; @@ -731,6 +967,7 @@ impl< fn get_nakamoto_reward_cycle_info( &mut self, block_height: u64, + stacks_tip: &StacksBlockId, ) -> Result, Error> { let sortition_tip_id = self .canonical_sortition_tip @@ -742,6 +979,7 @@ impl< sortition_tip_id, &self.burnchain, &mut self.chain_state_db, + stacks_tip, &mut self.sortition_db, &self.reward_set_provider, ) @@ -832,17 +1070,13 @@ impl< .block_height_to_reward_cycle(header.block_height) .unwrap_or(u64::MAX); - debug!( - "Process burn block {} reward cycle {} in {}", - header.block_height, reward_cycle, &self.burnchain.working_dir, - ); - info!( "Process burn block {} reward cycle {} in {}", header.block_height, reward_cycle, &self.burnchain.working_dir; "in_prepare_phase" => self.burnchain.is_in_prepare_phase(header.block_height), "is_rc_start" => self.burnchain.is_reward_cycle_start(header.block_height), "is_prior_in_prepare_phase" => self.burnchain.is_in_prepare_phase(header.block_height.saturating_sub(2)), + "burn_block_hash" => %header.block_hash, ); // calculate paid rewards during this burnchain block if we announce @@ -856,42 +1090,50 @@ impl< } }; - if self.burnchain.is_in_prepare_phase(header.block_height) { - // try to eagerly load up the reward cycle information, so we can persist it and - // make it available to signers. If we're at the _end_ of the prepare phase, then - // we have no choice but to block. - let reward_cycle_info = self.get_nakamoto_reward_cycle_info(header.block_height)?; - if let Some(rc_info) = reward_cycle_info { - // in nakamoto, if we have any reward cycle info at all, it will be known. - assert!( - rc_info.known_selected_anchor_block().is_some(), - "FATAL: unknown PoX anchor block in Nakamoto" - ); - } - } - let reward_cycle_info = if self.burnchain.is_reward_cycle_start(header.block_height) { // we're at the end of the prepare phase, so we'd better have obtained the reward // cycle info of we must block. - // N.B. it's `- 2` because `is_reward_cycle_start` implies that `block_height % reward_cycle_length == 1`, - // but this call needs `block_height % reward_cycle_length == reward_cycle_length - 1` -- i.e. `block_height` - // must be the last block height in the last reward cycle. - let end_cycle_block_height = header.block_height.saturating_sub(2); - let reward_cycle_info = - self.get_nakamoto_reward_cycle_info(end_cycle_block_height)?; + // NOTE(safety): the reason it's safe to use the local best stacks tip here is + // because as long as at least 30% of the signers are honest, there's no way there + // can be two or more distinct reward sets calculated for a reward cycle. Due to + // signature malleability, there can be multiple unconfirmed siblings at a given + // height H, but at height H+1, exactly one of those siblings will be canonical, + // and will remain canonical with respect to its tenure's Bitcoin fork forever. + // Here, we're loading a reward set calculated between H and H+99 from H+100, where + // H is the start of the prepare phase. So if we get any reward set from our + // canonical tip, it's guaranteed to be the canonical one. + let canonical_sortition_tip = self.canonical_sortition_tip.clone().unwrap_or( + // should be unreachable + SortitionDB::get_canonical_burn_chain_tip(&self.sortition_db.conn())? + .sortition_id, + ); + + let Some(local_best_nakamoto_tip) = self + .sortition_db + .index_handle(&canonical_sortition_tip) + .get_nakamoto_tip_block_id()? + else { + debug!("No Nakamoto blocks processed yet, so no reward cycle known for this next reward cycle"); + return Ok(false); + }; + + let reward_cycle_info = self.get_nakamoto_reward_cycle_info( + header.block_height, + &local_best_nakamoto_tip, + )?; if let Some(rc_info) = reward_cycle_info.as_ref() { // in nakamoto, if we have any reward cycle info at all, it will be known. // otherwise, we may have to process some more Stacks blocks if rc_info.known_selected_anchor_block().is_none() { - warn!("Unknown PoX anchor block in Nakamoto (at height {}). Refusing to process more burnchain blocks until that changes.", end_cycle_block_height); + warn!("Unknown PoX anchor block in Nakamoto (at height {}). Refusing to process more burnchain blocks until that changes.", header.block_height); return Ok(false); } } else { // have to block -- we don't have the reward cycle information debug!("Do not yet have PoX anchor block for next reward cycle -- no anchor block found"; + "local_best_nakamoto_tip" => %local_best_nakamoto_tip, "next_reward_cycle" => self.burnchain.block_height_to_reward_cycle(header.block_height), - "reward_cycle_end" => end_cycle_block_height - ); + "block_height" => header.block_height); return Ok(false); } reward_cycle_info diff --git a/stackslib/src/chainstate/nakamoto/coordinator/tests.rs b/stackslib/src/chainstate/nakamoto/coordinator/tests.rs index fffa64da3cd..569114aa124 100644 --- a/stackslib/src/chainstate/nakamoto/coordinator/tests.rs +++ b/stackslib/src/chainstate/nakamoto/coordinator/tests.rs @@ -15,6 +15,7 @@ // along with this program. If not, see . use std::collections::{HashMap, HashSet}; +use std::sync::Mutex; use clarity::vm::clarity::ClarityConnection; use clarity::vm::types::PrincipalData; @@ -22,47 +23,69 @@ use clarity::vm::Value; use rand::prelude::SliceRandom; use rand::{thread_rng, Rng, RngCore}; use stacks_common::address::{AddressHashMode, C32_ADDRESS_VERSION_TESTNET_SINGLESIG}; +use stacks_common::bitvec::BitVec; use stacks_common::consts::{ FIRST_BURNCHAIN_CONSENSUS_HASH, FIRST_STACKS_BLOCK_HASH, SIGNER_SLOTS_PER_USER, }; use stacks_common::types::chainstate::{ StacksAddress, StacksBlockId, StacksPrivateKey, StacksPublicKey, }; -use stacks_common::types::{Address, StacksEpoch}; +use stacks_common::types::{Address, StacksEpoch, StacksEpochId}; +use stacks_common::util::hash::Hash160; use stacks_common::util::secp256k1::Secp256k1PrivateKey; use stacks_common::util::vrf::VRFProof; use wsts::curve::point::Point; +use crate::burnchains::PoxConstants; use crate::chainstate::burn::db::sortdb::{SortitionDB, SortitionHandle}; -use crate::chainstate::burn::operations::BlockstackOperationType; +use crate::chainstate::burn::operations::{BlockstackOperationType, LeaderBlockCommitOp}; use crate::chainstate::coordinator::tests::{p2pkh_from, pox_addr_from}; +use crate::chainstate::nakamoto::coordinator::load_nakamoto_reward_set; +use crate::chainstate::nakamoto::miner::NakamotoBlockBuilder; use crate::chainstate::nakamoto::signer_set::NakamotoSigners; use crate::chainstate::nakamoto::test_signers::TestSigners; +use crate::chainstate::nakamoto::test_stall::*; use crate::chainstate::nakamoto::tests::get_account; use crate::chainstate::nakamoto::tests::node::TestStacker; -use crate::chainstate::nakamoto::{NakamotoBlock, NakamotoChainState}; +use crate::chainstate::nakamoto::{ + NakamotoBlock, NakamotoBlockObtainMethod, NakamotoChainState, NakamotoStagingBlocksConnRef, +}; use crate::chainstate::stacks::address::PoxAddress; +use crate::chainstate::stacks::boot::pox_4_tests::{get_stacking_minimum, get_tip}; use crate::chainstate::stacks::boot::signers_tests::{readonly_call, readonly_call_with_sortdb}; use crate::chainstate::stacks::boot::test::{ - key_to_stacks_addr, make_pox_4_lockup, make_signer_key_signature, - make_signers_vote_for_aggregate_public_key, make_signers_vote_for_aggregate_public_key_value, - with_sortdb, + key_to_stacks_addr, make_pox_4_lockup, make_signer_key_signature, with_sortdb, }; use crate::chainstate::stacks::boot::{MINERS_NAME, SIGNERS_NAME}; use crate::chainstate::stacks::db::{MinerPaymentTxFees, StacksAccount, StacksChainState}; use crate::chainstate::stacks::{ - CoinbasePayload, StacksTransaction, StacksTransactionSigner, TenureChangeCause, - TokenTransferMemo, TransactionAnchorMode, TransactionAuth, TransactionPayload, - TransactionVersion, + CoinbasePayload, Error as ChainstateError, StacksTransaction, StacksTransactionSigner, + TenureChangeCause, TokenTransferMemo, TransactionAnchorMode, TransactionAuth, + TransactionPayload, TransactionVersion, }; use crate::clarity::vm::types::StacksAddressExtensions; use crate::core::StacksEpochExtension; use crate::net::relay::Relayer; use crate::net::stackerdb::StackerDBConfig; use crate::net::test::{TestEventObserver, TestPeer, TestPeerConfig}; +use crate::net::tests::NakamotoBootPlan; +use crate::stacks_common::codec::StacksMessageCodec; use crate::util_lib::boot::boot_code_id; +use crate::util_lib::db::{query_rows, u64_to_sql}; use crate::util_lib::signed_structured_data::pox4::Pox4SignatureTopic; +impl<'a> NakamotoStagingBlocksConnRef<'a> { + pub fn get_blocks_at_height(&self, height: u64) -> Vec { + let sql = "SELECT data FROM nakamoto_staging_blocks WHERE height = ?1"; + let args = rusqlite::params![&u64_to_sql(height).unwrap()]; + let serialized_blocks: Vec> = query_rows(self, sql, args).unwrap(); + serialized_blocks + .into_iter() + .map(|blk_bytes| NakamotoBlock::consensus_deserialize(&mut &blk_bytes[..]).unwrap()) + .collect() + } +} + /// Bring a TestPeer into the Nakamoto Epoch fn advance_to_nakamoto( peer: &mut TestPeer, @@ -78,6 +101,8 @@ fn advance_to_nakamoto( &vec![StacksPublicKey::from_private(&private_key)], ) .unwrap(); + let default_pox_addr = + PoxAddress::from_legacy(AddressHashMode::SerializeP2PKH, addr.bytes.clone()); let mut tip = None; for sortition_height in 0..11 { @@ -87,21 +112,24 @@ fn advance_to_nakamoto( test_stackers .iter() .map(|test_stacker| { - let pox_addr = PoxAddress::from_legacy( - AddressHashMode::SerializeP2PKH, - addr.bytes.clone(), - ); + let pox_addr = test_stacker + .pox_addr + .clone() + .unwrap_or(default_pox_addr.clone()); + let max_amount = test_stacker.max_amount.clone().unwrap_or(u128::MAX); + let signature = make_signer_key_signature( &pox_addr, &test_stacker.signer_private_key, 6, &Pox4SignatureTopic::StackStx, 12_u128, - u128::MAX, + max_amount, 1, ); let signing_key = StacksPublicKey::from_private(&test_stacker.signer_private_key); + make_pox_4_lockup( &test_stacker.stacker_private_key, 0, @@ -111,22 +139,11 @@ fn advance_to_nakamoto( &signing_key, 34, Some(signature), - u128::MAX, + max_amount, 1, ) }) .collect() - } else if sortition_height == 8 { - with_sortdb(peer, |chainstate, sortdb| { - make_all_signers_vote_for_aggregate_key( - chainstate, - sortdb, - &tip.unwrap(), - test_signers, - test_stackers, - 7, - ) - }) } else { vec![] }; @@ -136,94 +153,8 @@ fn advance_to_nakamoto( // peer is at the start of cycle 8 } -pub fn make_all_signers_vote_for_aggregate_key( - chainstate: &mut StacksChainState, - sortdb: &SortitionDB, - tip: &StacksBlockId, - test_signers: &mut TestSigners, - test_stackers: &[TestStacker], - cycle_id: u128, -) -> Vec { - info!("Trigger signers vote for cycle {}", cycle_id); - - // Check if we already have an aggregate key for this cycle - if chainstate - .get_aggregate_public_key_pox_4(sortdb, tip, cycle_id as u64) - .unwrap() - .is_some() - { - debug!("Aggregate key already set for cycle {}", cycle_id); - return vec![]; - } - - // Generate a new aggregate key - test_signers.generate_aggregate_key(cycle_id as u64); - - let signers_res = readonly_call_with_sortdb( - chainstate, - sortdb, - tip, - SIGNERS_NAME.into(), - "get-signers".into(), - vec![Value::UInt(cycle_id)], - ); - - // If the signers are not set yet, then we're not ready to vote yet. - let signer_vec = match signers_res.expect_optional().unwrap() { - Some(signer_vec) => signer_vec.expect_list().unwrap(), - None => { - debug!("No signers set for cycle {}", cycle_id); - return vec![]; - } - }; - - let mut signers_to_index = HashMap::new(); - for (index, value) in signer_vec.into_iter().enumerate() { - let tuple = value.expect_tuple().unwrap(); - let signer = tuple - .get_owned("signer") - .unwrap() - .expect_principal() - .unwrap(); - let insert_res = signers_to_index.insert(signer, index); - assert!(insert_res.is_none(), "Duplicate signer in signers list"); - } - - // Build a map of the signers, their private keys, and their index - let mut signers = HashMap::new(); - for test_stacker in test_stackers { - let addr = key_to_stacks_addr(&test_stacker.signer_private_key); - let principal = PrincipalData::from(addr); - signers.insert( - addr, - ( - test_stacker.signer_private_key, - signers_to_index[&principal], - ), - ); - } - - // Vote for the aggregate key for each signer - info!("Trigger votes for cycle {}", cycle_id); - signers - .iter() - .map(|(addr, (signer_key, index))| { - let account = get_account(chainstate, sortdb, &addr); - make_signers_vote_for_aggregate_public_key_value( - signer_key, - account.nonce, - *index as u128, - Value::buff_from(test_signers.aggregate_public_key.compress().data.to_vec()) - .expect("Failed to serialize aggregate public key"), - 0, - cycle_id, - ) - }) - .collect() -} - /// Make a peer and transition it into the Nakamoto epoch. -/// The node needs to be stacking and it needs to vote for an aggregate key; +/// The node needs to be stacking. /// otherwise, Nakamoto can't activate. pub fn boot_nakamoto<'a>( test_name: &str, @@ -232,7 +163,6 @@ pub fn boot_nakamoto<'a>( test_stackers: &[TestStacker], observer: Option<&'a TestEventObserver>, ) -> TestPeer<'a> { - let aggregate_public_key = test_signers.aggregate_public_key.clone(); let mut peer_config = TestPeerConfig::new(test_name, 0, 0); let private_key = peer_config.private_key.clone(); let addr = StacksAddress::from_public_keys( @@ -247,7 +177,6 @@ pub fn boot_nakamoto<'a>( // first 25 blocks are boot-up // reward cycle 6 instantiates pox-3 // we stack in reward cycle 7 so pox-3 is evaluated to find reward set participation - peer_config.aggregate_public_key = Some(aggregate_public_key.clone()); peer_config .stacker_dbs .push(boot_code_id(MINERS_NAME, false)); @@ -391,6 +320,7 @@ fn replay_reward_cycle( let sort_tip = SortitionDB::get_canonical_sortition_tip(sortdb.conn()).unwrap(); let mut sort_handle = sortdb.index_handle(&sort_tip); + let nakamoto_tip = sort_handle.get_nakamoto_tip_block_id().unwrap().unwrap(); let mut blocks_to_process = stacks_blocks.to_vec(); blocks_to_process.shuffle(&mut thread_rng()); @@ -399,13 +329,16 @@ fn replay_reward_cycle( info!("Process Nakamoto block {} ({:?}", &block_id, &block.header); let accepted = Relayer::process_new_nakamoto_block( + &peer.config.burnchain, &sortdb, &mut sort_handle, &mut node.chainstate, - block.clone(), + &nakamoto_tip, + &block, None, + NakamotoBlockObtainMethod::Pushed, ) - .unwrap(); + .unwrap_or(false); if accepted { test_debug!("Accepted Nakamoto block {block_id}"); peer.coord.handle_new_nakamoto_stacks_block().unwrap(); @@ -425,8 +358,7 @@ fn replay_reward_cycle( /// Mine a single Nakamoto tenure with a single Nakamoto block #[test] fn test_simple_nakamoto_coordinator_bootup() { - let mut test_signers = TestSigners::default(); - let test_stackers = TestStacker::common_signing_set(&test_signers); + let (mut test_signers, test_stackers) = TestStacker::common_signing_set(); let mut peer = boot_nakamoto( function_name!(), vec![], @@ -490,8 +422,7 @@ fn test_simple_nakamoto_coordinator_1_tenure_10_blocks() { ) .unwrap(); - let mut test_signers = TestSigners::default(); - let test_stackers = TestStacker::common_signing_set(&test_signers); + let (mut test_signers, test_stackers) = TestStacker::common_signing_set(); let mut peer = boot_nakamoto( function_name!(), vec![(addr.into(), 100_000_000)], @@ -521,7 +452,7 @@ fn test_simple_nakamoto_coordinator_1_tenure_10_blocks() { tenure_change_tx, coinbase_tx, &mut test_signers, - |miner, chainstate, sortdb, blocks_so_far| { + |_miner, chainstate, sortdb, blocks_so_far| { if blocks_so_far.len() < 10 { debug!("\n\nProduce block {}\n\n", blocks_so_far.len()); @@ -596,12 +527,437 @@ fn test_simple_nakamoto_coordinator_1_tenure_10_blocks() { peer.check_nakamoto_migration(); } +impl<'a> TestPeer<'a> { + pub fn mine_single_block_tenure( + &mut self, + sender_key: &StacksPrivateKey, + tenure_change_tx: &StacksTransaction, + coinbase_tx: &StacksTransaction, + miner_setup: F, + after_block: G, + ) -> NakamotoBlock + where + F: FnMut(&mut NakamotoBlockBuilder), + G: FnMut(&mut NakamotoBlock) -> bool, + { + let sender_addr = StacksAddress::p2pkh(false, &StacksPublicKey::from_private(&sender_key)); + let mut test_signers = self.config.test_signers.clone().unwrap(); + let recipient_addr = + StacksAddress::from_string("ST2YM3J4KQK09V670TD6ZZ1XYNYCNGCWCVTASN5VM").unwrap(); + + // do a stx transfer in each block to a given recipient + let mut blocks_and_sizes = self.make_nakamoto_tenure_and( + tenure_change_tx.clone(), + coinbase_tx.clone(), + &mut test_signers, + miner_setup, + |_miner, chainstate, sortdb, blocks_so_far| { + if blocks_so_far.len() < 1 { + let account = get_account(chainstate, sortdb, &sender_addr); + let stx_transfer = make_token_transfer( + chainstate, + sortdb, + &sender_key, + account.nonce, + 100, + 1, + &recipient_addr, + ); + + vec![stx_transfer] + } else { + vec![] + } + }, + after_block, + ); + assert_eq!(blocks_and_sizes.len(), 1); + let block = blocks_and_sizes.pop().unwrap().0; + block + } + + pub fn single_block_tenure( + &mut self, + sender_key: &StacksPrivateKey, + miner_setup: S, + mut after_burn_ops: F, + after_block: G, + ) -> (NakamotoBlock, u64, StacksTransaction, StacksTransaction) + where + S: FnMut(&mut NakamotoBlockBuilder), + F: FnMut(&mut Vec), + G: FnMut(&mut NakamotoBlock) -> bool, + { + let (mut burn_ops, mut tenure_change, miner_key) = + self.begin_nakamoto_tenure(TenureChangeCause::BlockFound); + after_burn_ops(&mut burn_ops); + let (burn_height, _, consensus_hash) = self.next_burnchain_block(burn_ops.clone()); + let pox_constants = self.sortdb().pox_constants.clone(); + let first_burn_height = self.sortdb().first_block_height; + + info!( + "Burnchain block produced: {burn_height}, in_prepare_phase?: {}, first_reward_block?: {}", + pox_constants.is_in_prepare_phase(first_burn_height, burn_height), + pox_constants.is_reward_cycle_start(first_burn_height, burn_height) + ); + let vrf_proof = self.make_nakamoto_vrf_proof(miner_key); + + tenure_change.tenure_consensus_hash = consensus_hash.clone(); + tenure_change.burn_view_consensus_hash = consensus_hash.clone(); + + let tenure_change_tx = self + .miner + .make_nakamoto_tenure_change(tenure_change.clone()); + let coinbase_tx = self.miner.make_nakamoto_coinbase(None, vrf_proof); + + let block = self.mine_single_block_tenure( + sender_key, + &tenure_change_tx, + &coinbase_tx, + miner_setup, + after_block, + ); + + (block, burn_height, tenure_change_tx, coinbase_tx) + } +} + +#[test] +// Test the block commit descendant check in nakamoto +// - create a 12 address PoX reward set +// - make a normal block commit, assert that the bitvec must contain 1s for those addresses +// - make a burn block commit, assert that the bitvec must contain 0s for those addresses +fn block_descendant() { + let private_key = StacksPrivateKey::from_seed(&[2]); + let addr = StacksAddress::p2pkh(false, &StacksPublicKey::from_private(&private_key)); + + let num_stackers: u32 = 4; + let mut signing_key_seed = num_stackers.to_be_bytes().to_vec(); + signing_key_seed.extend_from_slice(&[1, 1, 1, 1]); + let signing_key = StacksPrivateKey::from_seed(signing_key_seed.as_slice()); + let test_stackers = (0..num_stackers) + .map(|index| TestStacker { + signer_private_key: signing_key.clone(), + stacker_private_key: StacksPrivateKey::from_seed(&index.to_be_bytes()), + amount: u64::MAX as u128 - 10000, + max_amount: Some(u64::MAX as u128), + pox_addr: Some(PoxAddress::Standard( + StacksAddress::new( + C32_ADDRESS_VERSION_TESTNET_SINGLESIG, + Hash160::from_data(&index.to_be_bytes()), + ), + Some(AddressHashMode::SerializeP2PKH), + )), + }) + .collect::>(); + let test_signers = TestSigners::new(vec![signing_key]); + let mut pox_constants = TestPeerConfig::default().burnchain.pox_constants; + pox_constants.reward_cycle_length = 10; + pox_constants.v2_unlock_height = 21; + pox_constants.pox_3_activation_height = 26; + pox_constants.v3_unlock_height = 27; + pox_constants.pox_4_activation_height = 28; + + let mut boot_plan = NakamotoBootPlan::new(function_name!()) + .with_test_stackers(test_stackers.clone()) + .with_test_signers(test_signers.clone()) + .with_private_key(private_key); + boot_plan.pox_constants = pox_constants; + + let mut peer = boot_plan.boot_into_nakamoto_peer(vec![], None); + let mut blocks = vec![]; + let pox_constants = peer.sortdb().pox_constants.clone(); + let first_burn_height = peer.sortdb().first_block_height; + + // mine until we're at the start of the prepare reward phase (so we *know* + // that the reward set contains entries) + loop { + let (block, burn_height, ..) = + peer.single_block_tenure(&private_key, |_| {}, |_| {}, |_| true); + blocks.push(block); + + if pox_constants.is_in_prepare_phase(first_burn_height, burn_height + 1) { + info!("At prepare phase start"; "burn_height" => burn_height); + break; + } + } + + // mine until right before the end of the prepare phase + loop { + let (burn_height, ..) = peer.mine_empty_tenure(); + if pox_constants.is_reward_cycle_start(first_burn_height, burn_height + 3) { + info!("At prepare phase end"; "burn_height" => burn_height); + break; + } + } + + // this should get chosen as the anchor block. + let (naka_anchor_block, ..) = peer.single_block_tenure(&private_key, |_| {}, |_| {}, |_| true); + + // make the index=0 block empty, because it doesn't get a descendancy check + // so, if this has a tenure mined, the direct parent check won't occur + peer.mine_empty_tenure(); + + // this would be where things go haywire. this tenure's parent will be the anchor block. + let (first_reward_block, ..) = peer.single_block_tenure(&private_key, |_| {}, |_| {}, |_| true); + + assert_eq!( + first_reward_block.header.parent_block_id, + naka_anchor_block.block_id() + ); +} + +#[test] +// Test PoX Reward and Punish treatment in nakamoto +// - create a 12 address PoX reward set +// - make a normal block commit, assert that the bitvec must contain 1s for those addresses +// - make a burn block commit, assert that the bitvec must contain 0s for those addresses +fn pox_treatment() { + let private_key = StacksPrivateKey::from_seed(&[2]); + let addr = StacksAddress::p2pkh(false, &StacksPublicKey::from_private(&private_key)); + + let num_stackers: u32 = 4; + let mut signing_key_seed = num_stackers.to_be_bytes().to_vec(); + signing_key_seed.extend_from_slice(&[1, 1, 1, 1]); + let signing_key = StacksPrivateKey::from_seed(signing_key_seed.as_slice()); + let test_stackers = (0..num_stackers) + .map(|index| TestStacker { + signer_private_key: signing_key.clone(), + stacker_private_key: StacksPrivateKey::from_seed(&index.to_be_bytes()), + amount: u64::MAX as u128 - 10000, + pox_addr: Some(PoxAddress::Standard( + StacksAddress::new( + C32_ADDRESS_VERSION_TESTNET_SINGLESIG, + Hash160::from_data(&index.to_be_bytes()), + ), + Some(AddressHashMode::SerializeP2PKH), + )), + max_amount: None, + }) + .collect::>(); + let test_signers = TestSigners::new(vec![signing_key]); + let mut pox_constants = TestPeerConfig::default().burnchain.pox_constants; + pox_constants.reward_cycle_length = 10; + pox_constants.v2_unlock_height = 21; + pox_constants.pox_3_activation_height = 26; + pox_constants.v3_unlock_height = 27; + pox_constants.pox_4_activation_height = 28; + + let mut boot_plan = NakamotoBootPlan::new(function_name!()) + .with_test_stackers(test_stackers.clone()) + .with_test_signers(test_signers.clone()) + .with_private_key(private_key); + boot_plan.pox_constants = pox_constants; + + let mut peer = boot_plan.boot_into_nakamoto_peer(vec![], None); + let mut blocks = vec![]; + let pox_constants = peer.sortdb().pox_constants.clone(); + let first_burn_height = peer.sortdb().first_block_height; + + // mine until we're at the start of the next reward phase (so we *know* + // that the reward set contains entries) + loop { + let (block, burn_height, ..) = + peer.single_block_tenure(&private_key, |_| {}, |_| {}, |_| true); + blocks.push(block); + + if pox_constants.is_reward_cycle_start(first_burn_height, burn_height + 1) { + break; + } + } + + let mut expected_reward_set = vec![]; + for stacker in test_stackers.iter() { + let pox_addr = stacker.pox_addr.as_ref().unwrap(); + (0..3).for_each(|_| expected_reward_set.push(pox_addr.clone())); + } + expected_reward_set.sort_by_key(|addr| addr.to_burnchain_repr()); + expected_reward_set.reverse(); + let pox_recipients = Mutex::new(vec![]); + info!("Starting the test... beginning with an reward commit"); + // The next block should be the start of a reward phase, so the PoX recipient should + // be chosen. + // + // First: perform a normal block commit, and then try to mine a block with all zeros in the + // bitvector. + let (invalid_block, _, tenure_change_tx, coinbase_tx) = peer.single_block_tenure( + &private_key, + |_| {}, + |burn_ops| { + burn_ops.iter().for_each(|op| { + if let BlockstackOperationType::LeaderBlockCommit(ref commit) = op { + *pox_recipients.lock().unwrap() = commit.commit_outs.clone(); + } + }); + }, + |block| { + let pox_recipients = pox_recipients.lock().unwrap(); + assert_eq!(pox_recipients.len(), 2); + info!( + "Expected reward set: {:?}", + expected_reward_set + .iter() + .map(|x| x.to_burnchain_repr()) + .collect::>() + ); + let target_indexes = pox_recipients.iter().map(|pox_addr| { + expected_reward_set + .iter() + .enumerate() + .find_map(|(ix, rs_addr)| if rs_addr == pox_addr { Some(ix) } else { None }) + .unwrap() + }); + let mut bitvec = BitVec::ones(12).unwrap(); + target_indexes.for_each(|ix| { + let ix: u16 = ix.try_into().unwrap(); + bitvec.set(ix, false).unwrap(); + bitvec.set(1 + ix, false).unwrap(); + bitvec.set(2 + ix, false).unwrap(); + }); + block.header.pox_treatment = bitvec; + // don't try to process this block yet, just return it so that + // we can assert the block error. + false + }, + ); + let processing_result = peer.try_process_block(&invalid_block).unwrap_err(); + assert_eq!( + processing_result.to_string(), + "Bitvec does not match the block commit's PoX handling".to_string(), + ); + assert!(matches!( + processing_result, + ChainstateError::InvalidStacksBlock(_), + )); + + // set the bitvec to a heterogenous one: either punish or + // reward is acceptable, so this block should just process. + let block = peer.mine_single_block_tenure( + &private_key, + &tenure_change_tx, + &coinbase_tx, + |_| {}, + |block| { + // each stacker has 3 entries in the bitvec. + // entries are ordered by PoxAddr, so this makes every entry a 1-of-3 + block.header.pox_treatment = BitVec::try_from( + [ + false, false, true, false, false, true, false, false, true, false, false, true, + ] + .as_slice(), + ) + .unwrap(); + true + }, + ); + blocks.push(block); + + // now we need to test punishment! + info!("Testing a punish commit"); + let pox_recipients = Mutex::new(vec![]); + let (invalid_block, _, tenure_change_tx, coinbase_tx) = peer.single_block_tenure( + &private_key, + |miner| { + // we want the miner to finish assembling the block, and then we'll + // alter the bitvec before it signs the block (in a subsequent closure). + // this way, we can test the block processing behavior. + miner.header.pox_treatment = BitVec::try_from( + [ + false, false, true, false, false, true, false, false, true, false, false, true, + ] + .as_slice(), + ) + .unwrap(); + }, + |burn_ops| { + burn_ops.iter_mut().for_each(|op| { + if let BlockstackOperationType::LeaderBlockCommit(ref mut commit) = op { + *pox_recipients.lock().unwrap() = vec![commit.commit_outs[0].clone()]; + commit.commit_outs[0] = PoxAddress::standard_burn_address(false); + } + }); + }, + |block| { + let pox_recipients = pox_recipients.lock().unwrap(); + assert_eq!(pox_recipients.len(), 1); + info!( + "Expected reward set: {:?}", + expected_reward_set + .iter() + .map(|x| x.to_burnchain_repr()) + .collect::>() + ); + let target_indexes = pox_recipients.iter().map(|pox_addr| { + expected_reward_set + .iter() + .enumerate() + .find_map(|(ix, rs_addr)| if rs_addr == pox_addr { Some(ix) } else { None }) + .unwrap() + }); + let mut bitvec = BitVec::zeros(12).unwrap(); + target_indexes.for_each(|ix| { + let ix: u16 = ix.try_into().unwrap(); + bitvec.set(ix, true).unwrap(); + bitvec.set(1 + ix, true).unwrap(); + bitvec.set(2 + ix, true).unwrap(); + }); + + block.header.pox_treatment = bitvec; + // don't try to process this block yet, just return it so that + // we can assert the block error. + false + }, + ); + let processing_result = peer.try_process_block(&invalid_block).unwrap_err(); + assert_eq!( + processing_result.to_string(), + "Bitvec does not match the block commit's PoX handling".to_string(), + ); + assert!(matches!( + processing_result, + ChainstateError::InvalidStacksBlock(_), + )); + + // set the bitvec to a heterogenous one: either punish or + // reward is acceptable, so this block should just process. + let block = peer.mine_single_block_tenure( + &private_key, + &tenure_change_tx, + &coinbase_tx, + |miner| { + // each stacker has 3 entries in the bitvec. + // entries are ordered by PoxAddr, so this makes every entry a 1-of-3 + miner.header.pox_treatment = BitVec::try_from( + [ + false, false, true, false, false, true, false, false, true, false, false, true, + ] + .as_slice(), + ) + .unwrap(); + }, + |_block| true, + ); + blocks.push(block); + + let tip = { + let chainstate = &mut peer.stacks_node.as_mut().unwrap().chainstate; + let sort_db = peer.sortdb.as_mut().unwrap(); + NakamotoChainState::get_canonical_block_header(chainstate.db(), sort_db) + .unwrap() + .unwrap() + }; + + assert_eq!( + tip.anchored_header.as_stacks_nakamoto().unwrap(), + &blocks.last().unwrap().header + ); +} + /// Test chainstate getters against an instantiated epoch2/Nakamoto chain. /// There are 11 epoch2 blocks and 2 nakamto tenure with 10 nakamoto blocks each /// Tests: /// * get_header_by_coinbase_height -/// * get_parent_vrf_proof -/// * get_highest_nakamoto_tenure +/// * get_ongoing_tenure /// * check_first_nakamoto_tenure /// * check_valid_consensus_hash /// * check_nakamoto_tenure @@ -616,8 +972,7 @@ fn test_nakamoto_chainstate_getters() { &vec![StacksPublicKey::from_private(&private_key)], ) .unwrap(); - let mut test_signers = TestSigners::default(); - let test_stackers = TestStacker::common_signing_set(&test_signers); + let (mut test_signers, test_stackers) = TestStacker::common_signing_set(); let mut peer = boot_nakamoto( function_name!(), vec![(addr.into(), 100_000_000)], @@ -634,18 +989,19 @@ fn test_nakamoto_chainstate_getters() { // scope this to drop the chainstate ref and db tx let chainstate = &peer.stacks_node.as_mut().unwrap().chainstate; let sort_db = peer.sortdb.as_mut().unwrap(); - let mut sort_tx = sort_db.tx_handle_begin(&sort_tip.sortition_id).unwrap(); + let sort_handle = sort_db.index_handle(&sort_tip.sortition_id); // no tenures yet - assert!( - NakamotoChainState::get_highest_nakamoto_tenure(chainstate.db(), sort_tx.sqlite()) - .unwrap() - .is_none() - ); + assert!(NakamotoChainState::get_ongoing_tenure( + &mut chainstate.index_conn(), + &sort_handle.get_nakamoto_tip_block_id().unwrap().unwrap() + ) + .unwrap() + .is_none()); // sortition-existence-check works assert_eq!( - NakamotoChainState::check_sortition_exists(&mut sort_tx, &sort_tip.consensus_hash) + NakamotoChainState::check_sortition_exists(&sort_handle, &sort_tip.consensus_hash) .unwrap(), sort_tip ); @@ -770,13 +1126,14 @@ fn test_nakamoto_chainstate_getters() { let mut sort_tx = sort_db.tx_handle_begin(&sort_tip.sortition_id).unwrap(); // we now have a tenure, and it confirms the last epoch2 block - let highest_tenure = - NakamotoChainState::get_highest_nakamoto_tenure(chainstate.db(), sort_tx.sqlite()) - .unwrap() - .unwrap(); + let highest_tenure = NakamotoChainState::get_ongoing_tenure( + &mut chainstate.index_conn(), + &sort_tx.get_nakamoto_tip_block_id().unwrap().unwrap(), + ) + .unwrap() + .unwrap(); assert_eq!(highest_tenure.coinbase_height, 12); assert_eq!(highest_tenure.num_blocks_confirmed, 1); - assert_eq!(highest_tenure.tenure_index, 1); assert_eq!(highest_tenure.tenure_id_consensus_hash, consensus_hash); assert_eq!(highest_tenure.burn_view_consensus_hash, consensus_hash); @@ -798,8 +1155,7 @@ fn test_nakamoto_chainstate_getters() { .unwrap() .is_some()); assert!(NakamotoChainState::check_tenure_continuity( - chainstate.db(), - sort_tx.sqlite(), + &mut chainstate.index_conn(), &blocks[0].header.consensus_hash, &blocks[1].header, ) @@ -823,15 +1179,19 @@ fn test_nakamoto_chainstate_getters() { .unwrap() .is_some()); - // this should fail, since it's not idempotent -- the highest tenure _is_ this tenure - assert!(NakamotoChainState::check_nakamoto_tenure( - chainstate.db(), - &mut sort_tx, - &blocks[0].header, - &tenure_change_payload, - ) - .unwrap() - .is_none()); + // this should return the previous tenure + assert_eq!( + NakamotoChainState::check_nakamoto_tenure( + &mut chainstate.index_conn(), + &mut sort_tx, + &blocks[0].header, + &tenure_change_payload, + ) + .unwrap() + .unwrap() + .tenure_id_consensus_hash, + tenure_change_payload.prev_tenure_consensus_hash + ); let cur_burn_tip = SortitionDB::get_canonical_burn_chain_tip(sort_tx.sqlite()).unwrap(); let (cur_stacks_ch, cur_stacks_bhh, cur_stacks_height) = @@ -853,14 +1213,18 @@ fn test_nakamoto_chainstate_getters() { .unwrap(); // check works (this would be the first tenure) - assert!(NakamotoChainState::check_nakamoto_tenure( - chainstate.db(), - &mut sort_tx, - &blocks[0].header, - &tenure_change_payload, - ) - .unwrap() - .is_some()); + assert_eq!( + NakamotoChainState::check_nakamoto_tenure( + &mut chainstate.index_conn(), + &mut sort_tx, + &blocks[0].header, + &tenure_change_payload, + ) + .unwrap() + .unwrap() + .tenure_id_consensus_hash, + tenure_change_payload.prev_tenure_consensus_hash + ); // restore sort_tx @@ -875,7 +1239,6 @@ fn test_nakamoto_chainstate_getters() { chainstate.db(), &blocks[0].header, 12, - 1, &tenure_change_payload, ) .unwrap(); @@ -908,16 +1271,6 @@ fn test_nakamoto_chainstate_getters() { .miner .make_nakamoto_coinbase(None, next_vrf_proof.clone()); - // parent VRF proof check - let parent_vrf_proof = NakamotoChainState::get_parent_vrf_proof( - &peer.stacks_node.as_ref().unwrap().chainstate.db(), - peer.sortdb.as_ref().unwrap().conn(), - &next_consensus_hash, - &txid, - ) - .unwrap(); - assert_eq!(parent_vrf_proof, vrf_proof); - // make the second tenure's blocks let blocks_and_sizes = peer.make_nakamoto_tenure( next_tenure_change_tx.clone(), @@ -962,13 +1315,14 @@ fn test_nakamoto_chainstate_getters() { let mut sort_tx = sort_db.tx_handle_begin(&sort_tip.sortition_id).unwrap(); // we now have a new highest tenure - let highest_tenure = - NakamotoChainState::get_highest_nakamoto_tenure(chainstate.db(), sort_tx.sqlite()) - .unwrap() - .unwrap(); + let highest_tenure = NakamotoChainState::get_ongoing_tenure( + &mut chainstate.index_conn(), + &sort_tx.get_nakamoto_tip_block_id().unwrap().unwrap(), + ) + .unwrap() + .unwrap(); assert_eq!(highest_tenure.coinbase_height, 13); assert_eq!(highest_tenure.num_blocks_confirmed, 10); - assert_eq!(highest_tenure.tenure_index, 2); assert_eq!(highest_tenure.tenure_id_consensus_hash, next_consensus_hash); assert_eq!(highest_tenure.prev_tenure_id_consensus_hash, consensus_hash); assert_eq!(highest_tenure.burn_view_consensus_hash, next_consensus_hash); @@ -987,15 +1341,13 @@ fn test_nakamoto_chainstate_getters() { .unwrap() .is_none()); assert!(NakamotoChainState::check_tenure_continuity( - chainstate.db(), - sort_tx.sqlite(), + &mut chainstate.index_conn(), &new_blocks[0].header.consensus_hash, &new_blocks[1].header, ) .unwrap()); assert!(!NakamotoChainState::check_tenure_continuity( - chainstate.db(), - sort_tx.sqlite(), + &mut chainstate.index_conn(), &blocks[0].header.consensus_hash, &new_blocks[1].header, ) @@ -1056,24 +1408,32 @@ fn test_nakamoto_chainstate_getters() { ) .unwrap(); - assert!(NakamotoChainState::check_nakamoto_tenure( - chainstate.db(), - &mut sort_tx, - &new_blocks[0].header, - &tenure_change_payload, - ) - .unwrap() - .is_some()); + assert_eq!( + NakamotoChainState::check_nakamoto_tenure( + &mut chainstate.index_conn(), + &mut sort_tx, + &new_blocks[0].header, + &tenure_change_payload, + ) + .unwrap() + .unwrap() + .tenure_id_consensus_hash, + tenure_change_payload.prev_tenure_consensus_hash + ); - // checks on older confired tenures continue to fail - assert!(NakamotoChainState::check_nakamoto_tenure( - chainstate.db(), - &mut sort_tx, - &blocks[0].header, - &old_tenure_change_payload, - ) - .unwrap() - .is_none()); + // checks on older confired tenures return the prev tenure + assert_eq!( + NakamotoChainState::check_nakamoto_tenure( + &mut chainstate.index_conn(), + &mut sort_tx, + &blocks[0].header, + &old_tenure_change_payload, + ) + .unwrap() + .unwrap() + .tenure_id_consensus_hash, + old_tenure_change_payload.prev_tenure_consensus_hash + ); // restore sort_tx @@ -1088,7 +1448,6 @@ fn test_nakamoto_chainstate_getters() { chainstate.db(), &new_blocks[0].header, 13, - 2, &tenure_change_payload, ) .unwrap(); @@ -1109,8 +1468,7 @@ pub fn simple_nakamoto_coordinator_10_tenures_10_sortitions<'a>() -> TestPeer<'a ) .unwrap(); - let mut test_signers = TestSigners::default(); - let test_stackers = TestStacker::common_signing_set(&test_signers); + let (mut test_signers, test_stackers) = TestStacker::common_signing_set(); let mut peer = boot_nakamoto( function_name!(), vec![(addr.into(), 100_000_000)], @@ -1155,33 +1513,6 @@ pub fn simple_nakamoto_coordinator_10_tenures_10_sortitions<'a>() -> TestPeer<'a let num_blocks: usize = (thread_rng().gen::() % 10) + 1; let block_height = peer.get_burn_block_height(); - // If we are in the prepare phase, check if we need to generate - // aggregate key votes - let txs = if peer.config.burnchain.is_in_prepare_phase(block_height) { - let cycle_id = peer - .config - .burnchain - .block_height_to_reward_cycle(block_height) - .unwrap(); - let next_cycle_id = cycle_id as u128 + 1; - - with_sortdb(&mut peer, |chainstate, sortdb| { - if let Some(tip) = all_blocks.last() { - make_all_signers_vote_for_aggregate_key( - chainstate, - sortdb, - &tip.block_id(), - &mut test_signers, - &test_stackers, - next_cycle_id, - ) - } else { - vec![] - } - }) - } else { - vec![] - }; // do a stx transfer in each block to a given recipient let recipient_addr = @@ -1191,13 +1522,7 @@ pub fn simple_nakamoto_coordinator_10_tenures_10_sortitions<'a>() -> TestPeer<'a coinbase_tx, &mut test_signers, |miner, chainstate, sortdb, blocks_so_far| { - // Include the aggregate key voting transactions in the first block. - let mut txs = if blocks_so_far.is_empty() { - txs.clone() - } else { - vec![] - }; - + let mut txs = vec![]; if blocks_so_far.len() < num_blocks { debug!("\n\nProduce block {}\n\n", all_blocks.len()); @@ -1378,18 +1703,7 @@ pub fn simple_nakamoto_coordinator_10_tenures_10_sortitions<'a>() -> TestPeer<'a assert_eq!(matured_reward.parent_miner.coinbase, 1000_000_000); } - if i == 8 { - // epoch2 - assert_eq!( - matured_reward.parent_miner.tx_fees, - MinerPaymentTxFees::Epoch2 { - // The signers voting transaction is paying a fee of 1 uSTX - // currently, but this may change to pay 0. - anchored: 1, - streamed: 0, - } - ); - } else if i < 11 { + if i < 11 { // epoch2 assert_eq!( matured_reward.parent_miner.tx_fees, @@ -1423,18 +1737,7 @@ pub fn simple_nakamoto_coordinator_10_tenures_10_sortitions<'a>() -> TestPeer<'a } else { assert_eq!(miner_reward.coinbase, 1000_000_000); } - if i == 7 { - // epoch2 - assert_eq!( - miner_reward.tx_fees, - MinerPaymentTxFees::Epoch2 { - // The signers voting transaction is paying a fee of 1 uSTX - // currently, but this may change to pay 0. - anchored: 1, - streamed: 0, - } - ); - } else if i < 10 { + if i < 10 { // epoch2 assert_eq!( miner_reward.tx_fees, @@ -1510,8 +1813,7 @@ pub fn simple_nakamoto_coordinator_2_tenures_3_sortitions<'a>() -> TestPeer<'a> &vec![StacksPublicKey::from_private(&private_key)], ) .unwrap(); - let mut test_signers = TestSigners::default(); - let test_stackers = TestStacker::common_signing_set(&test_signers); + let (mut test_signers, test_stackers) = TestStacker::common_signing_set(); let mut peer = boot_nakamoto( function_name!(), vec![(addr.into(), 100_000_000)], @@ -1600,10 +1902,16 @@ pub fn simple_nakamoto_coordinator_2_tenures_3_sortitions<'a>() -> TestPeer<'a> let chainstate = &mut peer.stacks_node.as_mut().unwrap().chainstate; let sort_db = peer.sortdb.as_mut().unwrap(); let tip = SortitionDB::get_canonical_burn_chain_tip(sort_db.conn()).unwrap(); - let tenure = - NakamotoChainState::get_highest_nakamoto_tenure(chainstate.db(), sort_db.conn()) + let tenure = NakamotoChainState::get_ongoing_tenure( + &mut chainstate.index_conn(), + &sort_db + .index_handle_at_tip() + .get_nakamoto_tip_block_id() .unwrap() - .unwrap(); + .unwrap(), + ) + .unwrap() + .unwrap(); (tenure, tip) }; assert_eq!(highest_tenure.tenure_id_consensus_hash, tip.consensus_hash); @@ -1614,7 +1922,6 @@ pub fn simple_nakamoto_coordinator_2_tenures_3_sortitions<'a>() -> TestPeer<'a> assert!(tip.consensus_hash == sort_tip.consensus_hash); assert_eq!(highest_tenure.coinbase_height, 12); assert_eq!(highest_tenure.cause, TenureChangeCause::BlockFound); - assert_eq!(highest_tenure.tenure_index, 1); assert_eq!(highest_tenure.num_blocks_confirmed, 1); // extend first tenure @@ -1692,10 +1999,16 @@ pub fn simple_nakamoto_coordinator_2_tenures_3_sortitions<'a>() -> TestPeer<'a> let chainstate = &mut peer.stacks_node.as_mut().unwrap().chainstate; let sort_db = peer.sortdb.as_mut().unwrap(); let tip = SortitionDB::get_canonical_burn_chain_tip(sort_db.conn()).unwrap(); - let tenure = - NakamotoChainState::get_highest_nakamoto_tenure(chainstate.db(), sort_db.conn()) + let tenure = NakamotoChainState::get_ongoing_tenure( + &mut chainstate.index_conn(), + &sort_db + .index_handle_at_tip() + .get_nakamoto_tip_block_id() .unwrap() - .unwrap(); + .unwrap(), + ) + .unwrap() + .unwrap(); (tenure, tip) }; assert_eq!(highest_tenure.tenure_id_consensus_hash, tip.consensus_hash); @@ -1706,7 +2019,6 @@ pub fn simple_nakamoto_coordinator_2_tenures_3_sortitions<'a>() -> TestPeer<'a> assert!(tip.consensus_hash != sort_tip.consensus_hash); assert_eq!(highest_tenure.coinbase_height, 12); assert_eq!(highest_tenure.cause, TenureChangeCause::Extended); - assert_eq!(highest_tenure.tenure_index, 2); assert_eq!(highest_tenure.num_blocks_confirmed, 10); // second tenure @@ -1787,10 +2099,16 @@ pub fn simple_nakamoto_coordinator_2_tenures_3_sortitions<'a>() -> TestPeer<'a> let chainstate = &mut peer.stacks_node.as_mut().unwrap().chainstate; let sort_db = peer.sortdb.as_mut().unwrap(); let tip = SortitionDB::get_canonical_burn_chain_tip(sort_db.conn()).unwrap(); - let tenure = - NakamotoChainState::get_highest_nakamoto_tenure(chainstate.db(), sort_db.conn()) + let tenure = NakamotoChainState::get_ongoing_tenure( + &mut chainstate.index_conn(), + &sort_db + .index_handle_at_tip() + .get_nakamoto_tip_block_id() .unwrap() - .unwrap(); + .unwrap(), + ) + .unwrap() + .unwrap(); (tenure, tip) }; assert_eq!(highest_tenure.tenure_id_consensus_hash, tip.consensus_hash); @@ -1801,7 +2119,6 @@ pub fn simple_nakamoto_coordinator_2_tenures_3_sortitions<'a>() -> TestPeer<'a> assert!(tip.consensus_hash == sort_tip.consensus_hash); assert_eq!(highest_tenure.coinbase_height, 13); assert_eq!(highest_tenure.cause, TenureChangeCause::BlockFound); - assert_eq!(highest_tenure.tenure_index, 3); assert_eq!(highest_tenure.num_blocks_confirmed, 20); // replay the blocks and sortitions in random order, and verify that we still reach the chain @@ -1848,8 +2165,12 @@ pub fn simple_nakamoto_coordinator_10_extended_tenures_10_sortitions() -> TestPe &vec![StacksPublicKey::from_private(&private_key)], ) .unwrap(); - let mut test_signers = TestSigners::default(); - let test_stackers = TestStacker::common_signing_set(&test_signers); + + // make enough signers and signing keys so we can create a block and a malleablized block that + // are both valid + let (mut test_signers, test_stackers) = TestStacker::multi_signing_set(&[ + 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, + ]); let mut peer = boot_nakamoto( function_name!(), vec![(addr.into(), 100_000_000)], @@ -1883,33 +2204,6 @@ pub fn simple_nakamoto_coordinator_10_extended_tenures_10_sortitions() -> TestPe debug!("Next burnchain block: {}", &consensus_hash); let block_height = peer.get_burn_block_height(); - // If we are in the prepare phase, check if we need to generate - // aggregate key votes - let txs = if peer.config.burnchain.is_in_prepare_phase(block_height) { - let cycle_id = peer - .config - .burnchain - .block_height_to_reward_cycle(block_height) - .unwrap(); - let next_cycle_id = cycle_id as u128 + 1; - - with_sortdb(&mut peer, |chainstate, sortdb| { - if let Some(tip) = all_blocks.last() { - make_all_signers_vote_for_aggregate_key( - chainstate, - sortdb, - &tip.block_id(), - &mut test_signers, - &test_stackers, - next_cycle_id, - ) - } else { - vec![] - } - }) - } else { - vec![] - }; // do a stx transfer in each block to a given recipient let recipient_addr = @@ -1920,12 +2214,7 @@ pub fn simple_nakamoto_coordinator_10_extended_tenures_10_sortitions() -> TestPe &mut test_signers, |miner, chainstate, sortdb, blocks_so_far| { if blocks_so_far.len() < 10 { - // Include the aggregate key voting transactions in the first block. - let mut txs = if blocks_so_far.is_empty() { - txs.clone() - } else { - vec![] - }; + let mut txs = vec![]; debug!("\n\nProduce block {}\n\n", blocks_so_far.len()); @@ -1989,10 +2278,16 @@ pub fn simple_nakamoto_coordinator_10_extended_tenures_10_sortitions() -> TestPe let chainstate = &mut peer.stacks_node.as_mut().unwrap().chainstate; let sort_db = peer.sortdb.as_mut().unwrap(); let tip = SortitionDB::get_canonical_burn_chain_tip(sort_db.conn()).unwrap(); - let tenure = - NakamotoChainState::get_highest_nakamoto_tenure(chainstate.db(), sort_db.conn()) + let tenure = NakamotoChainState::get_ongoing_tenure( + &mut chainstate.index_conn(), + &sort_db + .index_handle_at_tip() + .get_nakamoto_tip_block_id() .unwrap() - .unwrap(); + .unwrap(), + ) + .unwrap() + .unwrap(); (tenure, tip) }; @@ -2008,7 +2303,6 @@ pub fn simple_nakamoto_coordinator_10_extended_tenures_10_sortitions() -> TestPe assert!(last_block.header.consensus_hash == sort_tip.consensus_hash); assert_eq!(highest_tenure.coinbase_height, 12 + i); assert_eq!(highest_tenure.cause, TenureChangeCause::Extended); - assert_eq!(highest_tenure.tenure_index, 10 * (i + 1)); assert_eq!( highest_tenure.num_blocks_confirmed, (blocks.len() as u32) - 1 @@ -2038,9 +2332,6 @@ pub fn simple_nakamoto_coordinator_10_extended_tenures_10_sortitions() -> TestPe rc_blocks.push(all_blocks.clone()); rc_burn_ops.push(all_burn_ops.clone()); - all_burn_ops.clear(); - all_blocks.clear(); - // in nakamoto, tx fees are rewarded by the next tenure, so the // scheduled rewards come 1 tenure after the coinbase reward matures let miner = p2pkh_from(&stx_miner_key); @@ -2155,6 +2446,7 @@ pub fn simple_nakamoto_coordinator_10_extended_tenures_10_sortitions() -> TestPe ); peer.check_nakamoto_migration(); + peer.check_malleablized_blocks(all_blocks, 2); return peer; } @@ -2162,3 +2454,89 @@ pub fn simple_nakamoto_coordinator_10_extended_tenures_10_sortitions() -> TestPe fn test_nakamoto_coordinator_10_tenures_and_extensions_10_blocks() { simple_nakamoto_coordinator_10_extended_tenures_10_sortitions(); } + +#[test] +fn process_next_nakamoto_block_deadlock() { + let private_key = StacksPrivateKey::from_seed(&[2]); + let addr = StacksAddress::p2pkh(false, &StacksPublicKey::from_private(&private_key)); + + let num_stackers: u32 = 4; + let mut signing_key_seed = num_stackers.to_be_bytes().to_vec(); + signing_key_seed.extend_from_slice(&[1, 1, 1, 1]); + let signing_key = StacksPrivateKey::from_seed(signing_key_seed.as_slice()); + let test_stackers = (0..num_stackers) + .map(|index| TestStacker { + signer_private_key: signing_key.clone(), + stacker_private_key: StacksPrivateKey::from_seed(&index.to_be_bytes()), + amount: u64::MAX as u128 - 10000, + pox_addr: Some(PoxAddress::Standard( + StacksAddress::new( + C32_ADDRESS_VERSION_TESTNET_SINGLESIG, + Hash160::from_data(&index.to_be_bytes()), + ), + Some(AddressHashMode::SerializeP2PKH), + )), + max_amount: None, + }) + .collect::>(); + let test_signers = TestSigners::new(vec![signing_key]); + let mut pox_constants = TestPeerConfig::default().burnchain.pox_constants; + pox_constants.reward_cycle_length = 10; + pox_constants.v2_unlock_height = 21; + pox_constants.pox_3_activation_height = 26; + pox_constants.v3_unlock_height = 27; + pox_constants.pox_4_activation_height = 28; + + let mut boot_plan = NakamotoBootPlan::new(function_name!()) + .with_test_stackers(test_stackers.clone()) + .with_test_signers(test_signers.clone()) + .with_private_key(private_key); + boot_plan.pox_constants = pox_constants; + + info!("Creating peer"); + + let mut peer = boot_plan.boot_into_nakamoto_peer(vec![], None); + let mut sortition_db = peer.sortdb().reopen().unwrap(); + let (chainstate, _) = &mut peer + .stacks_node + .as_mut() + .unwrap() + .chainstate + .reopen() + .unwrap(); + + enable_process_block_stall(); + + let miner_thread = std::thread::spawn(move || { + info!(" ------------------------------- MINING TENURE"); + let (block, burn_height, ..) = + peer.single_block_tenure(&private_key, |_| {}, |_| {}, |_| true); + info!(" ------------------------------- TENURE MINED"); + }); + + // Wait a bit, to ensure the miner has reached the stall + std::thread::sleep(std::time::Duration::from_secs(10)); + + // Lock the sortdb + info!(" ------------------------------- TRYING TO LOCK THE SORTDB"); + let sort_tx = sortition_db.tx_begin().unwrap(); + info!(" ------------------------------- SORTDB LOCKED"); + + // Un-stall the block processing + disable_process_block_stall(); + + // Wait a bit, to ensure the tenure will have grabbed any locks it needs + std::thread::sleep(std::time::Duration::from_secs(10)); + + // Lock the chainstate db + info!(" ------------------------------- TRYING TO LOCK THE CHAINSTATE"); + let chainstate_tx = chainstate.chainstate_tx_begin().unwrap(); + + info!(" ------------------------------- SORTDB AND CHAINSTATE LOCKED"); + drop(chainstate_tx); + drop(sort_tx); + info!(" ------------------------------- MAIN THREAD FINISHED"); + + // Wait for the blocker and miner threads to finish + miner_thread.join().unwrap(); +} diff --git a/stackslib/src/chainstate/nakamoto/keys.rs b/stackslib/src/chainstate/nakamoto/keys.rs new file mode 100644 index 00000000000..2944c70affa --- /dev/null +++ b/stackslib/src/chainstate/nakamoto/keys.rs @@ -0,0 +1,143 @@ +// Copyright (C) 2024 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use stacks_common::types::chainstate::{ConsensusHash, StacksBlockId}; +use stacks_common::util::hash::{hex_bytes, to_hex}; + +use crate::chainstate::nakamoto::tenure::NakamotoTenureEventId; + +/// MARF key for the ongoing tenure ID. Maps to a consensus hash +pub fn ongoing_tenure_id() -> &'static str { + "nakamoto::tenures::ongoing_tenure_id" +} + +/// MARF key to map the coinbase height of a tenure to its consensus hash +pub fn ongoing_tenure_coinbase_height(coinbase_height: u64) -> String { + format!( + "nakamoto::tenures::ongoing_tenure_coinbase_height::{}", + coinbase_height + ) +} + +/// MARF key to map the consensus hash of a tenure to its block-found block ID +pub fn block_found_tenure_id(tenure_id_consensus_hash: &ConsensusHash) -> String { + format!( + "nakamoto::tenures::block_found_tenure_id::{}", + tenure_id_consensus_hash + ) +} + +/// MARF key to map the consensus hash of a tenure to its highest block's ID +pub fn highest_block_in_tenure(tenure_id_consensus_hash: &ConsensusHash) -> String { + format!( + "nakamoto::tenures::highest_block_in_tenure::{}", + tenure_id_consensus_hash + ) +} + +/// MARF key to map a tenure to its coinbase height +pub fn coinbase_height(ch: &ConsensusHash) -> String { + format!("nakamoto::headers::coinbase_height::{}", ch) +} + +/// MARF key to map a tenure to its start-block's ID +pub fn tenure_start_block_id(ch: &ConsensusHash) -> String { + format!("nakamoto::headers::tenure_start_block_id::{}", ch) +} + +/// MARF key to map a tenure to its final block's block ID +pub fn finished_tenure_consensus_hash(ch: &ConsensusHash) -> String { + format!("nakamoto::tenures::finished_tenure_consensus_hash::{}", ch) +} + +/// MARF key to map a tenure to its parent tenure +pub fn parent_tenure_consensus_hash(ch: &ConsensusHash) -> String { + format!("nakamoto::tenures::parent_tenure_consensus_hash::{}", ch) +} + +/// Canonical MARF value of a block ID +pub fn make_block_id_value(id: &StacksBlockId) -> String { + format!("{}", id) +} + +/// Canonical MARF value of a consensus hash +pub fn make_consensus_hash_value(ch: &ConsensusHash) -> String { + format!("{}", ch) +} + +/// Canonical MARF value of a u64 +pub fn make_u64_value(value: u64) -> String { + to_hex(&value.to_be_bytes()) +} + +/// Canonical MARF value of a bool +pub fn make_bool_value(value: bool) -> String { + to_hex(&[if value { 1 } else { 0 }]) +} + +/// Canonical MARF value of a tenure event ID +pub fn make_tenure_id_value(value: &NakamotoTenureEventId) -> String { + format!("{}{}", &value.burn_view_consensus_hash, &value.block_id) +} + +/// Decode a MARF-stored consensus hash +pub fn parse_consensus_hash(value: &str) -> Option { + ConsensusHash::from_hex(value).ok() +} + +/// Decode a MARF-stored block ID +pub fn parse_block_id(value: &str) -> Option { + StacksBlockId::from_hex(value).ok() +} + +/// Decode a MARF-stored u64 +pub fn parse_u64(value: &str) -> Option { + let bytes = hex_bytes(value).ok()?; + if bytes.len() != 8 { + return None; + } + let mut bytes_u64 = [0u8; 8]; + bytes_u64[0..8].copy_from_slice(&bytes[0..8]); + Some(u64::from_be_bytes(bytes_u64)) +} + +/// Decode a MARF-stored bool +pub fn parse_bool(value: &str) -> Option { + let bytes = hex_bytes(value).ok()?; + if bytes.len() != 1 { + return None; + } + Some(bytes[0] != 0) +} + +/// Decode a MARF-stored tenure event ID +pub fn parse_tenure_id_value(value: &str) -> Option { + let bytes = hex_bytes(value).ok()?; + if bytes.len() != 52 { + // ConsensusHash is 20 bytes + // StacksBlockId is 32 bytes + return None; + } + let mut ch_bytes = [0u8; 20]; + let mut block_id_bytes = [0u8; 32]; + ch_bytes[0..20].copy_from_slice(&bytes[0..20]); + block_id_bytes[0..32].copy_from_slice(&bytes[20..52]); + + let id = NakamotoTenureEventId { + burn_view_consensus_hash: ConsensusHash(ch_bytes), + block_id: StacksBlockId(block_id_bytes), + }; + Some(id) +} diff --git a/stackslib/src/chainstate/nakamoto/miner.rs b/stackslib/src/chainstate/nakamoto/miner.rs index 33ee2653690..66aa6cc1d98 100644 --- a/stackslib/src/chainstate/nakamoto/miner.rs +++ b/stackslib/src/chainstate/nakamoto/miner.rs @@ -41,16 +41,19 @@ use stacks_common::util::hash::{Hash160, MerkleTree, Sha512Trunc256Sum}; use stacks_common::util::secp256k1::{MessageSignature, Secp256k1PrivateKey}; use crate::burnchains::{PrivateKey, PublicKey}; -use crate::chainstate::burn::db::sortdb::{SortitionDB, SortitionDBConn, SortitionHandleTx}; +use crate::chainstate::burn::db::sortdb::{ + SortitionDB, SortitionDBConn, SortitionHandleConn, SortitionHandleTx, +}; use crate::chainstate::burn::operations::*; use crate::chainstate::burn::*; +use crate::chainstate::coordinator::OnChainRewardSetProvider; use crate::chainstate::nakamoto::{ MaturedMinerRewards, NakamotoBlock, NakamotoBlockHeader, NakamotoChainState, SetupBlockResult, }; use crate::chainstate::stacks::address::StacksAddressExtensions; use crate::chainstate::stacks::boot::MINERS_NAME; use crate::chainstate::stacks::db::accounts::MinerReward; -use crate::chainstate::stacks::db::blocks::MemPoolRejection; +use crate::chainstate::stacks::db::blocks::{DummyEventDispatcher, MemPoolRejection}; use crate::chainstate::stacks::db::transactions::{ handle_clarity_runtime_error, ClarityRuntimeTxError, }; @@ -79,6 +82,7 @@ use crate::util_lib::boot::boot_code_id; use crate::util_lib::db::Error as DBError; /// Nakamaoto tenure information +#[derive(Debug)] pub struct NakamotoTenureInfo { /// Coinbase tx, if this is a new tenure pub coinbase_tx: Option, @@ -119,7 +123,7 @@ pub struct NakamotoBlockBuilder { /// transactions selected txs: Vec, /// header we're filling in - header: NakamotoBlockHeader, + pub header: NakamotoBlockHeader, } pub struct MinerTenureInfo<'a> { @@ -136,6 +140,8 @@ pub struct MinerTenureInfo<'a> { pub parent_burn_block_height: u32, pub coinbase_height: u64, pub cause: Option, + pub active_reward_set: boot::RewardSet, + pub tenure_block_commit: LeaderBlockCommitOp, } impl NakamotoBlockBuilder { @@ -176,6 +182,7 @@ impl NakamotoBlockBuilder { total_burn: u64, tenure_change: Option<&StacksTransaction>, coinbase: Option<&StacksTransaction>, + bitvec_len: u16, ) -> Result { let next_height = parent_stacks_header .anchored_header @@ -208,6 +215,12 @@ impl NakamotoBlockBuilder { total_burn, tenure_id_consensus_hash.clone(), parent_stacks_header.index_block_hash(), + bitvec_len, + parent_stacks_header + .anchored_header + .as_stacks_nakamoto() + .map(|b| b.timestamp) + .unwrap_or(0), ), }) } @@ -219,11 +232,72 @@ impl NakamotoBlockBuilder { pub fn load_tenure_info<'a>( &self, chainstate: &'a mut StacksChainState, - burn_dbconn: &'a SortitionDBConn, + burn_dbconn: &'a SortitionHandleConn, cause: Option, ) -> Result, Error> { debug!("Nakamoto miner tenure begin"); + let Some(tenure_election_sn) = + SortitionDB::get_block_snapshot_consensus(&burn_dbconn, &self.header.consensus_hash)? + else { + warn!("Could not find sortition snapshot for burn block that elected the miner"; + "consensus_hash" => %self.header.consensus_hash, + "stacks_block_hash" => %self.header.block_hash(), + "stacks_block_id" => %self.header.block_id() + ); + return Err(Error::NoSuchBlockError); + }; + let Some(tenure_block_commit) = SortitionDB::get_block_commit( + &burn_dbconn, + &tenure_election_sn.winning_block_txid, + &tenure_election_sn.sortition_id, + )? + else { + warn!("Could not find winning block commit for burn block that elected the miner"; + "consensus_hash" => %self.header.consensus_hash, + "stacks_block_hash" => %self.header.block_hash(), + "stacks_block_id" => %self.header.block_id(), + "winning_txid" => %tenure_election_sn.winning_block_txid + ); + return Err(Error::NoSuchBlockError); + }; + + let elected_height = tenure_election_sn.block_height; + let elected_in_cycle = burn_dbconn + .context + .pox_constants + .block_height_to_reward_cycle(burn_dbconn.context.first_block_height, elected_height) + .ok_or_else(|| { + Error::InvalidStacksBlock( + "Elected in block height before first_block_height".into(), + ) + })?; + let rs_provider = OnChainRewardSetProvider::(None); + let coinbase_height_of_calc = rs_provider.get_height_of_pox_calculation( + elected_in_cycle, + chainstate, + burn_dbconn, + &self.header.parent_block_id, + ).map_err(|e| { + warn!( + "Cannot process Nakamoto block: could not load reward set that elected the block"; + "err" => ?e, + ); + Error::NoSuchBlockError + })?; + let active_reward_set = rs_provider.read_reward_set_at_calculated_block( + coinbase_height_of_calc, + chainstate, + &self.header.parent_block_id, + true, + ).map_err(|e| { + warn!( + "Cannot process Nakamoto block: could not load reward set that elected the block"; + "err" => ?e, + ); + Error::NoSuchBlockError + })?; + // must build off of the header's consensus hash as the burnchain view, not the canonical_tip_bhh: let burn_sn = SortitionDB::get_block_snapshot_consensus(burn_dbconn.conn(), &self.header.consensus_hash)? .ok_or_else(|| { @@ -256,7 +330,7 @@ impl NakamotoBlockBuilder { let parent_block_id = StacksBlockId::new(&parent_consensus_hash, &parent_header_hash); let parent_coinbase_height = - NakamotoChainState::get_coinbase_height(chainstate.db(), &parent_block_id) + NakamotoChainState::get_coinbase_height(&mut chainstate.index_conn(), &parent_block_id) .ok() .flatten() .unwrap_or(0); @@ -285,6 +359,8 @@ impl NakamotoBlockBuilder { parent_burn_block_height: chain_tip.burn_header_height, cause, coinbase_height, + active_reward_set, + tenure_block_commit, }) } @@ -295,7 +371,7 @@ impl NakamotoBlockBuilder { /// yet known). pub fn tenure_begin<'a, 'b>( &mut self, - burn_dbconn: &'a SortitionDBConn, + burn_dbconn: &'a SortitionHandleConn, info: &'b mut MinerTenureInfo<'a>, ) -> Result, Error> { let SetupBlockResult { @@ -317,6 +393,9 @@ impl NakamotoBlockBuilder { info.cause == Some(TenureChangeCause::BlockFound), info.coinbase_height, info.cause == Some(TenureChangeCause::Extended), + &self.header.pox_treatment, + &info.tenure_block_commit, + &info.active_reward_set, )?; self.matured_miner_rewards_opt = matured_miner_rewards_opt; Ok(clarity_tx) @@ -394,7 +473,7 @@ impl NakamotoBlockBuilder { pub fn build_nakamoto_block( // not directly used; used as a handle to open other chainstates chainstate_handle: &StacksChainState, - burn_dbconn: &SortitionDBConn, + burn_dbconn: &SortitionHandleConn, mempool: &mut MemPoolDB, // Stacks header we're building off of. parent_stacks_header: &StacksHeaderInfo, @@ -406,6 +485,7 @@ impl NakamotoBlockBuilder { settings: BlockBuilderSettings, event_observer: Option<&dyn MemPoolEventDispatcher>, signer_transactions: Vec, + signer_bitvec_len: u16, ) -> Result<(NakamotoBlock, ExecutionCost, u64, Vec), Error> { let (tip_consensus_hash, tip_block_hash, tip_height) = ( parent_stacks_header.consensus_hash.clone(), @@ -426,6 +506,7 @@ impl NakamotoBlockBuilder { total_burn, tenure_info.tenure_change_tx(), tenure_info.coinbase_tx(), + signer_bitvec_len, )?; let ts_start = get_epoch_time_ms(); @@ -490,8 +571,8 @@ impl NakamotoBlockBuilder { info!( "Miner: mined Nakamoto block"; - "block_hash" => %block.header.block_hash(), - "block_id" => %block.header.block_id(), + "stacks_block_hash" => %block.header.block_hash(), + "stacks_block_id" => %block.header.block_id(), "height" => block.header.chain_length, "tx_count" => block.txs.len(), "parent_block_id" => %block.header.parent_block_id, @@ -499,6 +580,7 @@ impl NakamotoBlockBuilder { "execution_consumed" => %consumed, "%-full" => block_limit.proportion_largest_dimension(&consumed), "assembly_time_ms" => ts_end.saturating_sub(ts_start), + "consensus_hash" => %block.header.consensus_hash ); Ok((block, consumed, size, tx_events)) @@ -507,43 +589,6 @@ impl NakamotoBlockBuilder { pub fn get_bytes_so_far(&self) -> u64 { self.bytes_so_far } - - /// Make a StackerDB chunk message containing a proposed block. - /// Sign it with the miner's private key. - /// Automatically determine which StackerDB slot and version number to use. - /// Returns Some(chunk) if the given key corresponds to one of the expected miner slots - /// Returns None if not - /// Returns an error on signing or DB error - pub fn make_stackerdb_block_proposal( - sortdb: &SortitionDB, - tip: &BlockSnapshot, - stackerdbs: &StackerDBs, - block: &T, - miner_privkey: &StacksPrivateKey, - miners_contract_id: &QualifiedContractIdentifier, - ) -> Result, Error> { - let miner_pubkey = StacksPublicKey::from_private(&miner_privkey); - let Some(slot_range) = NakamotoChainState::get_miner_slot(sortdb, tip, &miner_pubkey)? - else { - // No slot exists for this miner - return Ok(None); - }; - // proposal slot is the first slot. - let slot_id = slot_range.start; - // Get the LAST slot version number written to the DB. If not found, use 0. - // Add 1 to get the NEXT version number - // Note: we already check above for the slot's existence - let slot_version = stackerdbs - .get_slot_version(&miners_contract_id, slot_id)? - .unwrap_or(0) - .saturating_add(1); - let block_bytes = block.serialize_to_vec(); - let mut chunk = StackerDBChunkData::new(slot_id, slot_version, block_bytes); - chunk - .sign(miner_privkey) - .map_err(|_| net_error::SigningError("Failed to sign StackerDB chunk".into()))?; - Ok(Some(chunk)) - } } impl BlockBuilder for NakamotoBlockBuilder { diff --git a/stackslib/src/chainstate/nakamoto/mod.rs b/stackslib/src/chainstate/nakamoto/mod.rs index 724f3681cd4..d059a96cb63 100644 --- a/stackslib/src/chainstate/nakamoto/mod.rs +++ b/stackslib/src/chainstate/nakamoto/mod.rs @@ -19,6 +19,8 @@ use std::fs; use std::ops::{Deref, DerefMut, Range}; use std::path::PathBuf; +use clarity::types::PublicKey; +use clarity::util::secp256k1::{secp256k1_recover, Secp256k1PublicKey}; use clarity::vm::ast::ASTRules; use clarity::vm::costs::{ExecutionCost, LimitedCostTracker}; use clarity::vm::database::{BurnStateDB, ClarityDatabase}; @@ -27,8 +29,8 @@ use clarity::vm::types::{PrincipalData, StacksAddressExtensions, TupleData}; use clarity::vm::{ClarityVersion, SymbolicExpression, Value}; use lazy_static::{__Deref, lazy_static}; use rusqlite::blob::Blob; -use rusqlite::types::{FromSql, FromSqlError}; -use rusqlite::{params, Connection, OpenFlags, OptionalExtension, ToSql, NO_PARAMS}; +use rusqlite::types::{FromSql, FromSqlError, FromSqlResult, ToSql, ToSqlOutput}; +use rusqlite::{params, Connection, OpenFlags, OptionalExtension}; use sha2::{Digest as Sha2Digest, Sha512_256}; use stacks_common::bitvec::BitVec; use stacks_common::codec::{ @@ -39,11 +41,14 @@ use stacks_common::consts::{ FIRST_BURNCHAIN_CONSENSUS_HASH, FIRST_STACKS_BLOCK_HASH, MINER_REWARD_MATURITY, }; use stacks_common::types::chainstate::{ - BlockHeaderHash, BurnchainHeaderHash, ConsensusHash, StacksAddress, StacksBlockId, + BlockHeaderHash, BurnchainHeaderHash, ConsensusHash, SortitionId, StacksAddress, StacksBlockId, StacksPrivateKey, StacksPublicKey, TrieHash, VRFSeed, }; +use stacks_common::types::sqlite::NO_PARAMS; use stacks_common::types::{PrivateKey, StacksEpochId}; -use stacks_common::util::hash::{to_hex, Hash160, MerkleHashFunc, MerkleTree, Sha512Trunc256Sum}; +use stacks_common::util::hash::{ + hex_bytes, to_hex, Hash160, MerkleHashFunc, MerkleTree, Sha512Trunc256Sum, +}; use stacks_common::util::retry::BoundReader; use stacks_common::util::secp256k1::MessageSignature; use stacks_common::util::vrf::{VRFProof, VRFPublicKey, VRF}; @@ -57,13 +62,14 @@ use super::burn::db::sortdb::{ }; use super::burn::operations::{DelegateStxOp, StackStxOp, TransferStxOp, VoteForAggregateKeyOp}; use super::stacks::boot::{ - PoxVersions, RawRewardSetEntry, RewardSet, RewardSetData, BOOT_TEST_POX_4_AGG_KEY_CONTRACT, - BOOT_TEST_POX_4_AGG_KEY_FNAME, SIGNERS_MAX_LIST_SIZE, SIGNERS_NAME, SIGNERS_PK_LEN, + NakamotoSignerEntry, PoxVersions, RawRewardSetEntry, RewardSet, RewardSetData, + BOOT_TEST_POX_4_AGG_KEY_CONTRACT, BOOT_TEST_POX_4_AGG_KEY_FNAME, SIGNERS_MAX_LIST_SIZE, + SIGNERS_NAME, SIGNERS_PK_LEN, }; use super::stacks::db::accounts::MinerReward; use super::stacks::db::{ ChainstateTx, ClarityTx, MinerPaymentSchedule, MinerPaymentTxFees, MinerRewardInfo, - StacksBlockHeaderTypes, StacksDBTx, StacksEpochReceipt, StacksHeaderInfo, + StacksBlockHeaderTypes, StacksEpochReceipt, StacksHeaderInfo, }; use super::stacks::events::{StacksTransactionReceipt, TransactionOrigin}; use super::stacks::{ @@ -74,12 +80,21 @@ use crate::burnchains::{Burnchain, PoxConstants, Txid}; use crate::chainstate::burn::db::sortdb::SortitionDB; use crate::chainstate::burn::operations::{LeaderBlockCommitOp, LeaderKeyRegisterOp}; use crate::chainstate::burn::{BlockSnapshot, SortitionHash}; -use crate::chainstate::coordinator::{BlockEventDispatcher, Error}; +use crate::chainstate::coordinator::{BlockEventDispatcher, Error, OnChainRewardSetProvider}; +use crate::chainstate::nakamoto::coordinator::load_nakamoto_reward_set; +use crate::chainstate::nakamoto::keys as nakamoto_keys; use crate::chainstate::nakamoto::signer_set::NakamotoSigners; -use crate::chainstate::nakamoto::tenure::NAKAMOTO_TENURES_SCHEMA; +use crate::chainstate::nakamoto::staging_blocks::NakamotoBlockObtainMethod; +use crate::chainstate::nakamoto::tenure::{ + NakamotoTenureEventId, NAKAMOTO_TENURES_SCHEMA_1, NAKAMOTO_TENURES_SCHEMA_2, + NAKAMOTO_TENURES_SCHEMA_3, +}; use crate::chainstate::stacks::address::PoxAddress; use crate::chainstate::stacks::boot::{POX_4_NAME, SIGNERS_UPDATE_STATE}; -use crate::chainstate::stacks::db::{DBConfig as ChainstateConfig, StacksChainState}; +use crate::chainstate::stacks::db::blocks::DummyEventDispatcher; +use crate::chainstate::stacks::db::{ + DBConfig as ChainstateConfig, StacksChainState, StacksDBConn, StacksDBTx, +}; use crate::chainstate::stacks::index::marf::MarfConnection; use crate::chainstate::stacks::{ TenureChangeCause, MINER_BLOCK_CONSENSUS_HASH, MINER_BLOCK_HEADER_HASH, @@ -89,7 +104,7 @@ use crate::clarity_vm::clarity::{ ClarityInstance, ClarityTransactionConnection, Error as ClarityError, PreCommitClarityBlock, }; use crate::clarity_vm::database::SortitionDBRef; -use crate::core::BOOT_BLOCK_HASH; +use crate::core::{BOOT_BLOCK_HASH, NAKAMOTO_SIGNER_BLOCK_APPROVAL_THRESHOLD}; use crate::net::stackerdb::{StackerDBConfig, MINER_SLOT_COUNT}; use crate::net::Error as net_error; use crate::util_lib::boot; @@ -101,6 +116,7 @@ use crate::util_lib::db::{ use crate::{chainstate, monitoring}; pub mod coordinator; +pub mod keys; pub mod miner; pub mod signer_set; pub mod staging_blocks; @@ -121,13 +137,13 @@ define_named_enum!(HeaderTypeNames { }); impl ToSql for HeaderTypeNames { - fn to_sql(&self) -> rusqlite::Result> { + fn to_sql(&self) -> rusqlite::Result> { self.get_name_str().to_sql() } } impl FromSql for HeaderTypeNames { - fn column_result(value: rusqlite::types::ValueRef<'_>) -> rusqlite::types::FromSqlResult { + fn column_result(value: rusqlite::types::ValueRef<'_>) -> FromSqlResult { Self::lookup_by_name(value.as_str()?).ok_or_else(|| FromSqlError::InvalidType) } } @@ -144,7 +160,7 @@ lazy_static! { reward_set TEXT NOT NULL, PRIMARY KEY (index_block_hash) );"#.into(), - NAKAMOTO_TENURES_SCHEMA.into(), + NAKAMOTO_TENURES_SCHEMA_1.into(), r#" -- Table for Nakamoto block headers CREATE TABLE nakamoto_block_headers ( @@ -152,7 +168,7 @@ lazy_static! { block_height INTEGER NOT NULL, -- root hash of the internal, not-consensus-critical MARF that allows us to track chainstate/fork metadata index_root TEXT NOT NULL, - -- burn header hash corresponding to the consensus hash (NOT guaranteed to be unique, since we can + -- burn header hash corresponding to the consensus hash (NOT guaranteed to be unique, since we can -- have 2+ blocks per burn block if there's a PoX fork) burn_header_hash TEXT NOT NULL, -- height of the burnchain block header that generated this consensus hash @@ -178,7 +194,7 @@ lazy_static! { state_index_root TEXT NOT NULL, -- miner's signature over the block miner_signature TEXT NOT NULL, - -- signers' signature over the block + -- signers' signatures over the block signer_signature TEXT NOT NULL, -- bitvec capturing stacker participation in signature signer_bitvec TEXT NOT NULL, @@ -188,7 +204,7 @@ lazy_static! { header_type TEXT NOT NULL, -- hash of the block block_hash TEXT NOT NULL, - -- index_block_hash is the hash of the block hash and consensus hash of the burn block that selected it, + -- index_block_hash is the hash of the block hash and consensus hash of the burn block that selected it, -- and is guaranteed to be globally unique (across all Stacks forks and across all PoX forks). -- index_block_hash is the block hash fed into the MARF index. index_block_hash TEXT NOT NULL, @@ -207,17 +223,245 @@ lazy_static! { ); CREATE INDEX nakamoto_block_headers_by_consensus_hash ON nakamoto_block_headers(consensus_hash); "#.into(), - format!( - r#"ALTER TABLE payments - ADD COLUMN schedule_type TEXT NOT NULL DEFAULT "{}"; - "#, - HeaderTypeNames::Epoch2.get_name_str()), - r#" - UPDATE db_config SET version = "4"; - "#.into(), + format!( + r#"ALTER TABLE payments + ADD COLUMN schedule_type TEXT NOT NULL DEFAULT "{}"; + "#, + HeaderTypeNames::Epoch2.get_name_str()), + r#" + UPDATE db_config SET version = "4"; + "#.into(), + ]; + + pub static ref NAKAMOTO_CHAINSTATE_SCHEMA_2: Vec = vec![ + NAKAMOTO_TENURES_SCHEMA_2.into(), + r#" + ALTER TABLE nakamoto_block_headers + ADD COLUMN timestamp INTEGER NOT NULL; + "#.into(), + r#" + UPDATE db_config SET version = "5"; + "#.into(), + // make burn_view NULLable. We could use a default value, but NULL should be safer (because it will error). + // there should be no entries in nakamoto_block_headers with a NULL entry when this column is added, because + // nakamoto blocks have not been produced yet. + r#" + ALTER TABLE nakamoto_block_headers + ADD COLUMN burn_view TEXT; + "#.into(), + ]; + + pub static ref NAKAMOTO_CHAINSTATE_SCHEMA_3: Vec = vec![ + NAKAMOTO_TENURES_SCHEMA_3.into(), + r#" + UPDATE db_config SET version = "6"; + "#.into(), + // Add a `height_in_tenure` field to the block header row, so we know how high this block is + // within its tenure. This is needed to process malleablized Nakamoto blocks with the same + // height, as well as accidental forks that can arise from slow miners. + // + // + // + // No default value is needed because at the time of this writing, this table is actually empty. + r#" + ALTER TABLE nakamoto_block_headers + ADD COLUMN height_in_tenure; + "#.into(), ]; } +#[cfg(test)] +mod test_stall { + pub static TEST_PROCESS_BLOCK_STALL: std::sync::Mutex> = + std::sync::Mutex::new(None); + + pub fn stall_block_processing() { + if *TEST_PROCESS_BLOCK_STALL.lock().unwrap() == Some(true) { + // Do an extra check just so we don't log EVERY time. + warn!("Block processing is stalled due to testing directive."); + while *TEST_PROCESS_BLOCK_STALL.lock().unwrap() == Some(true) { + std::thread::sleep(std::time::Duration::from_millis(10)); + } + info!("Block processing is no longer stalled due to testing directive."); + } + } + + pub fn enable_process_block_stall() { + TEST_PROCESS_BLOCK_STALL.lock().unwrap().replace(true); + } + + pub fn disable_process_block_stall() { + TEST_PROCESS_BLOCK_STALL.lock().unwrap().replace(false); + } +} + +/// Trait for common MARF getters between StacksDBConn and StacksDBTx +pub trait StacksDBIndexed { + fn get(&mut self, tip: &StacksBlockId, key: &str) -> Result, DBError>; + fn sqlite(&self) -> &Connection; + + /// Get the block ID for a specific coinbase height in the fork identified by `tip` + fn get_nakamoto_block_id_at_coinbase_height( + &mut self, + tip: &StacksBlockId, + coinbase_height: u64, + ) -> Result, DBError> { + Ok(self + .get( + tip, + &nakamoto_keys::ongoing_tenure_coinbase_height(coinbase_height), + )? + .map(|id_str| nakamoto_keys::parse_block_id(&id_str)) + .flatten()) + } + + /// Get the first block in the tenure for a given tenure ID consensus hash in the fork + /// identified by `tip` + fn get_tenure_start_block_id( + &mut self, + tip: &StacksBlockId, + tenure_id_consensus_hash: &ConsensusHash, + ) -> Result, DBError> { + Ok(self + .get( + tip, + &nakamoto_keys::tenure_start_block_id(tenure_id_consensus_hash), + )? + .map(|id_str| nakamoto_keys::parse_block_id(&id_str)) + .flatten()) + } + + /// Get the coinbase height of a tenure (identified by its consensus hash) in a fork identified + /// by `tip` + fn get_coinbase_height( + &mut self, + tip: &StacksBlockId, + tenure_id_consensus_hash: &ConsensusHash, + ) -> Result, DBError> { + Ok(self + .get( + tip, + &nakamoto_keys::coinbase_height(tenure_id_consensus_hash), + )? + .map(|height_str| nakamoto_keys::parse_u64(&height_str)) + .flatten()) + } + + /// Get the ongoing tenure ID in the fork identified by `tip` + fn get_ongoing_tenure_id( + &mut self, + tip: &StacksBlockId, + ) -> Result, DBError> { + Ok(self + .get(tip, nakamoto_keys::ongoing_tenure_id())? + .map(|id_str| nakamoto_keys::parse_tenure_id_value(&id_str)) + .flatten()) + } + + /// Get the highest block ID in a tenure identified by its consensus hash in the Stacks fork + /// identified by `tip` + fn get_highest_block_id_in_tenure( + &mut self, + tip: &StacksBlockId, + tenure_id_consensus_hash: &ConsensusHash, + ) -> Result, DBError> { + Ok(self + .get( + tip, + &nakamoto_keys::highest_block_in_tenure(tenure_id_consensus_hash), + )? + .map(|id_str| nakamoto_keys::parse_block_id(&id_str)) + .flatten()) + } + + /// Get the block-found tenure ID for a given tenure's consensus hash (if defined) in a given + /// Stacks fork identified by `tip` + fn get_block_found_tenure_id( + &mut self, + tip: &StacksBlockId, + tenure_id_consensus_hash: &ConsensusHash, + ) -> Result, DBError> { + Ok(self + .get( + tip, + &nakamoto_keys::block_found_tenure_id(tenure_id_consensus_hash), + )? + .map(|id_str| nakamoto_keys::parse_tenure_id_value(&id_str)) + .flatten()) + } + + /// Determine if a tenure, identified by its consensus hash, has finished in a fork identified + /// by `tip` + fn is_tenure_finished( + &mut self, + tip: &StacksBlockId, + tenure_id_consensus_hash: &ConsensusHash, + ) -> Result, DBError> { + if self + .get_tenure_start_block_id(tip, tenure_id_consensus_hash)? + .is_none() + { + // tenure not started + return Ok(None); + } + if self + .get( + tip, + &nakamoto_keys::finished_tenure_consensus_hash(tenure_id_consensus_hash), + )? + .is_none() + { + // tenure has started, but is not done yet + return Ok(Some(false)); + } + + // tenure started and finished + Ok(Some(true)) + } + + /// Get the parent tenure consensus hash of a given tenure (identified by its own consensus + /// hash) within a Stacks fork identified by `tip` + fn get_parent_tenure_consensus_hash( + &mut self, + tip: &StacksBlockId, + tenure_id_consensus_hash: &ConsensusHash, + ) -> Result, DBError> { + Ok(self + .get( + tip, + &nakamoto_keys::parent_tenure_consensus_hash(tenure_id_consensus_hash), + )? + .map(|ch_str| nakamoto_keys::parse_consensus_hash(&ch_str)) + .flatten()) + } +} + +impl StacksDBIndexed for StacksDBConn<'_> { + fn get(&mut self, tip: &StacksBlockId, key: &str) -> Result, DBError> { + self.get_indexed(tip, key) + } + + fn sqlite(&self) -> &Connection { + self.conn() + } +} + +impl StacksDBIndexed for StacksDBTx<'_> { + fn get(&mut self, tip: &StacksBlockId, key: &str) -> Result, DBError> { + self.get_indexed(tip, key) + } + + fn sqlite(&self) -> &Connection { + self.tx().deref() + } +} + +impl<'a> ChainstateTx<'a> { + pub fn as_tx(&mut self) -> &mut StacksDBTx<'a> { + &mut self.tx + } +} + /// Matured miner reward schedules #[derive(Debug, Clone)] pub struct MaturedMinerPaymentSchedules { @@ -236,6 +480,38 @@ impl MaturedMinerPaymentSchedules { } } +/// Struct containing information about the miners assigned in the +/// .miners stackerdb config +pub struct MinersDBInformation { + signer_0_sortition: ConsensusHash, + signer_1_sortition: ConsensusHash, + latest_winner: u16, +} + +impl MinersDBInformation { + /// What index in the `.miners` stackerdb is the miner who won + /// `sortition`? + pub fn get_signer_index(&self, sortition: &ConsensusHash) -> Option { + if sortition == &self.signer_0_sortition { + Some(0) + } else if sortition == &self.signer_1_sortition { + Some(1) + } else { + None + } + } + + /// Get all of the sortitions whose winners are included in .miners + pub fn get_sortitions(&self) -> [&ConsensusHash; 2] { + [&self.signer_0_sortition, &self.signer_1_sortition] + } + + /// Get the index of the latest sortition winner in .miners + pub fn get_latest_winner_index(&self) -> u16 { + self.latest_winner + } +} + /// Calculated matured miner rewards, from scheduled rewards #[derive(Debug, Clone)] pub struct MaturedMinerRewards { @@ -303,13 +579,22 @@ pub struct NakamotoBlockHeader { pub tx_merkle_root: Sha512Trunc256Sum, /// The MARF trie root hash after this block has been processed pub state_index_root: TrieHash, + /// A Unix time timestamp of when this block was mined, according to the miner. + /// For the signers to consider a block valid, this timestamp must be: + /// * Greater than the timestamp of its parent block + /// * Less than 15 seconds into the future + pub timestamp: u64, /// Recoverable ECDSA signature from the tenure's miner. pub miner_signature: MessageSignature, - /// Schnorr signature over the block header from the signer set active during the tenure. - pub signer_signature: ThresholdSignature, - /// A bitvec which represents the signers that participated in this block signature. + /// The set of recoverable ECDSA signatures over + /// the block header from the signer set active during the tenure. + /// (ordered by reward set order) + pub signer_signature: Vec, + /// A bitvec which conveys whether reward addresses should be punished (by burning their PoX rewards) + /// or not in this block. + /// /// The maximum number of entries in the bitvec is 4000. - pub signer_bitvec: BitVec<4000>, + pub pox_treatment: BitVec<4000>, } impl FromRow for NakamotoBlockHeader { @@ -325,9 +610,13 @@ impl FromRow for NakamotoBlockHeader { let parent_block_id = row.get("parent_block_id")?; let tx_merkle_root = row.get("tx_merkle_root")?; let state_index_root = row.get("state_index_root")?; - let signer_signature = row.get("signer_signature")?; + let timestamp_i64: i64 = row.get("timestamp")?; + let timestamp = timestamp_i64.try_into().map_err(|_| DBError::ParseError)?; let miner_signature = row.get("miner_signature")?; let signer_bitvec = row.get("signer_bitvec")?; + let signer_signature_json: String = row.get("signer_signature")?; + let signer_signature: Vec = + serde_json::from_str(&signer_signature_json).map_err(|_e| DBError::ParseError)?; Ok(NakamotoBlockHeader { version, @@ -337,9 +626,10 @@ impl FromRow for NakamotoBlockHeader { parent_block_id, tx_merkle_root, state_index_root, + timestamp, signer_signature, miner_signature, - signer_bitvec, + pox_treatment: signer_bitvec, }) } } @@ -388,9 +678,10 @@ impl StacksMessageCodec for NakamotoBlockHeader { write_next(fd, &self.parent_block_id)?; write_next(fd, &self.tx_merkle_root)?; write_next(fd, &self.state_index_root)?; + write_next(fd, &self.timestamp)?; write_next(fd, &self.miner_signature)?; write_next(fd, &self.signer_signature)?; - write_next(fd, &self.signer_bitvec)?; + write_next(fd, &self.pox_treatment)?; Ok(()) } @@ -404,9 +695,10 @@ impl StacksMessageCodec for NakamotoBlockHeader { parent_block_id: read_next(fd)?, tx_merkle_root: read_next(fd)?, state_index_root: read_next(fd)?, + timestamp: read_next(fd)?, miner_signature: read_next(fd)?, signer_signature: read_next(fd)?, - signer_bitvec: read_next(fd)?, + pox_treatment: read_next(fd)?, }) } } @@ -438,6 +730,7 @@ impl NakamotoBlockHeader { write_next(fd, &self.parent_block_id)?; write_next(fd, &self.tx_merkle_root)?; write_next(fd, &self.state_index_root)?; + write_next(fd, &self.timestamp)?; Ok(Sha512Trunc256Sum::from_hasher(hasher)) } @@ -453,8 +746,9 @@ impl NakamotoBlockHeader { write_next(fd, &self.parent_block_id)?; write_next(fd, &self.tx_merkle_root)?; write_next(fd, &self.state_index_root)?; + write_next(fd, &self.timestamp)?; write_next(fd, &self.miner_signature)?; - write_next(fd, &self.signer_bitvec)?; + write_next(fd, &self.pox_treatment)?; Ok(Sha512Trunc256Sum::from_hasher(hasher)) } @@ -467,8 +761,12 @@ impl NakamotoBlockHeader { } pub fn block_hash(&self) -> BlockHeaderHash { - BlockHeaderHash::from_serializer(self) - .expect("BUG: failed to serialize block header hash struct") + // same as sighash -- we don't commit to signatures + BlockHeaderHash( + self.signer_signature_hash_inner() + .expect("BUG: failed to serialize block header hash struct") + .0, + ) } pub fn block_id(&self) -> StacksBlockId { @@ -489,20 +787,115 @@ impl NakamotoBlockHeader { Ok(()) } - /// Verify the block header against an aggregate public key - pub fn verify_signer(&self, signer_aggregate: &Point) -> bool { - let schnorr_signature = &self.signer_signature.0; - let message = self.signer_signature_hash().0; - schnorr_signature.verify(signer_aggregate, &message) + /// Verify the block header against the list of signer signatures + /// + /// Validate against: + /// - Any invalid signatures (eg not recoverable or not from a signer) + /// - Any duplicate signatures + /// - At least the minimum number of signatures (based on total signer weight + /// and a 70% threshold) + /// - Order of signatures is maintained vs signer set + /// + /// Returns the signing weight on success. + /// Returns ChainstateError::InvalidStacksBlock on error + #[cfg_attr(test, mutants::skip)] + pub fn verify_signer_signatures(&self, reward_set: &RewardSet) -> Result { + let message = self.signer_signature_hash(); + let Some(signers) = &reward_set.signers else { + return Err(ChainstateError::InvalidStacksBlock( + "No signers in the reward set".to_string(), + )); + }; + + let mut total_weight_signed: u32 = 0; + // `last_index` is used to prevent out-of-order signatures + let mut last_index = None; + + let total_weight = reward_set + .total_signing_weight() + .map_err(|_| ChainstateError::NoRegisteredSigners(0))?; + + // HashMap of + let signers_by_pk: HashMap<_, _> = signers + .iter() + .enumerate() + .map(|(i, signer)| (&signer.signing_key, (signer, i))) + .collect(); + + for signature in self.signer_signature.iter() { + let public_key = Secp256k1PublicKey::recover_to_pubkey(message.bits(), signature) + .map_err(|_| { + ChainstateError::InvalidStacksBlock(format!( + "Unable to recover public key from signature {}", + signature.to_hex() + )) + })?; + + let mut public_key_bytes = [0u8; 33]; + public_key_bytes.copy_from_slice(&public_key.to_bytes_compressed()[..]); + + let (signer, signer_index) = signers_by_pk.get(&public_key_bytes).ok_or_else(|| { + ChainstateError::InvalidStacksBlock(format!( + "Public key {} not found in the reward set", + public_key.to_hex() + )) + })?; + + // Enforce order of signatures + if let Some(index) = last_index.as_ref() { + if *index >= *signer_index { + return Err(ChainstateError::InvalidStacksBlock( + "Signatures are out of order".to_string(), + )); + } + } else { + last_index = Some(*signer_index); + } + + total_weight_signed = total_weight_signed + .checked_add(signer.weight) + .expect("FATAL: overflow while computing signer set threshold"); + } + + let threshold = Self::compute_voting_weight_threshold(total_weight)?; + + if total_weight_signed < threshold { + return Err(ChainstateError::InvalidStacksBlock(format!( + "Not enough signatures. Needed at least {} but got {} (out of {})", + threshold, total_weight_signed, total_weight, + ))); + } + + return Ok(total_weight_signed); + } + + /// Compute the threshold for the minimum number of signers (by weight) required + /// to approve a Nakamoto block. + pub fn compute_voting_weight_threshold(total_weight: u32) -> Result { + let threshold = NAKAMOTO_SIGNER_BLOCK_APPROVAL_THRESHOLD; + let total_weight = u64::from(total_weight); + let ceil = if (total_weight * threshold) % 10 == 0 { + 0 + } else { + 1 + }; + u32::try_from((total_weight * threshold) / 10 + ceil).map_err(|_| { + ChainstateError::InvalidStacksBlock( + "Overflow when computing nakamoto block approval threshold".to_string(), + ) + }) } /// Make an "empty" header whose block data needs to be filled in. - /// This is used by the miner code. + /// This is used by the miner code. The block's timestamp is set here, at + /// the time of creation. pub fn from_parent_empty( chain_length: u64, burn_spent: u64, consensus_hash: ConsensusHash, parent_block_id: StacksBlockId, + bitvec_len: u16, + parent_timestamp: u64, ) -> NakamotoBlockHeader { NakamotoBlockHeader { version: NAKAMOTO_BLOCK_VERSION, @@ -512,9 +905,11 @@ impl NakamotoBlockHeader { parent_block_id, tx_merkle_root: Sha512Trunc256Sum([0u8; 32]), state_index_root: TrieHash([0u8; 32]), + timestamp: std::cmp::max(parent_timestamp, get_epoch_time_secs()), miner_signature: MessageSignature::empty(), - signer_signature: ThresholdSignature::empty(), - signer_bitvec: BitVec::zeros(1).expect("BUG: bitvec of length-1 failed to construct"), + signer_signature: vec![], + pox_treatment: BitVec::ones(bitvec_len) + .expect("BUG: bitvec of length-1 failed to construct"), } } @@ -528,9 +923,10 @@ impl NakamotoBlockHeader { parent_block_id: StacksBlockId([0u8; 32]), tx_merkle_root: Sha512Trunc256Sum([0u8; 32]), state_index_root: TrieHash([0u8; 32]), + timestamp: 0, miner_signature: MessageSignature::empty(), - signer_signature: ThresholdSignature::empty(), - signer_bitvec: BitVec::zeros(1).expect("BUG: bitvec of length-1 failed to construct"), + signer_signature: vec![], + pox_treatment: BitVec::zeros(1).expect("BUG: bitvec of length-1 failed to construct"), } } @@ -544,9 +940,10 @@ impl NakamotoBlockHeader { parent_block_id: StacksBlockId(BOOT_BLOCK_HASH.0.clone()), tx_merkle_root: Sha512Trunc256Sum([0u8; 32]), state_index_root: TrieHash([0u8; 32]), + timestamp: get_epoch_time_secs(), miner_signature: MessageSignature::empty(), - signer_signature: ThresholdSignature::empty(), - signer_bitvec: BitVec::zeros(1).expect("BUG: bitvec of length-1 failed to construct"), + signer_signature: vec![], + pox_treatment: BitVec::zeros(1).expect("BUG: bitvec of length-1 failed to construct"), } } } @@ -716,13 +1113,21 @@ impl NakamotoBlock { } let Some(tc_payload) = self.try_get_tenure_change_payload() else { - warn!("Invalid block -- tx at index 0 is not a tenure tx",); + warn!("Invalid block -- tx at index 0 is not a tenure tx"; + "consensus_hash" => %self.header.consensus_hash, + "stacks_block_hash" => %self.header.block_hash(), + "stacks_block_id" => %self.header.block_id() + ); return Err(()); }; if tc_payload.cause != TenureChangeCause::Extended { // not a tenure-extend, and can't be valid since all other tenure-change types require // a coinbase (which is not present) - warn!("Invalid block -- tenure tx cause is not an extension"); + warn!("Invalid block -- tenure tx cause is not an extension"; + "consensus_hash" => %self.header.consensus_hash, + "stacks_block_hash" => %self.header.block_hash(), + "stacks_block_id" => %self.header.block_id() + ); return Err(()); } @@ -730,8 +1135,11 @@ impl NakamotoBlock { // discontinuous warn!( "Invalid block -- discontiguous"; - "previosu_tenure_end" => %tc_payload.previous_tenure_end, - "parent_block_id" => %self.header.parent_block_id + "previous_tenure_end" => %tc_payload.previous_tenure_end, + "parent_block_id" => %self.header.parent_block_id, + "consensus_hash" => %self.header.consensus_hash, + "stacks_block_hash" => %self.header.block_hash(), + "stacks_block_id" => %self.header.block_id() ); return Err(()); } @@ -745,6 +1153,8 @@ impl NakamotoBlock { "tenure_consensus_hash" => %tc_payload.tenure_consensus_hash, "prev_tenure_consensus_hash" => %tc_payload.prev_tenure_consensus_hash, "consensus_hash" => %self.header.consensus_hash, + "stacks_block_hash" => %self.header.block_hash(), + "stacks_block_id" => %self.header.block_id() ); return Err(()); } @@ -800,14 +1210,21 @@ impl NakamotoBlock { warn!( "Invalid block -- have {} coinbases and {} tenure txs", coinbase_positions.len(), - tenure_change_positions.len() + tenure_change_positions.len(); + "consensus_hash" => %self.header.consensus_hash, + "stacks_block_hash" => %self.header.block_hash(), + "stacks_block_id" => %self.header.block_id() ); return Err(()); } if coinbase_positions.len() == 1 && tenure_change_positions.len() == 0 { // coinbase unaccompanied by a tenure change - warn!("Invalid block -- have coinbase without tenure change"); + warn!("Invalid block -- have coinbase without tenure change"; + "consensus_hash" => %self.header.consensus_hash, + "stacks_block_hash" => %self.header.block_hash(), + "stacks_block_id" => %self.header.block_id() + ); return Err(()); } @@ -818,7 +1235,10 @@ impl NakamotoBlock { // wrong position warn!( "Invalid block -- tenure change positions = {:?}, expected [0]", - &tenure_change_positions, + &tenure_change_positions; + "consensus_hash" => %self.header.consensus_hash, + "stacks_block_hash" => %self.header.block_hash(), + "stacks_block_id" => %self.header.block_id() ); return Err(()); } @@ -827,13 +1247,21 @@ impl NakamotoBlock { let TransactionPayload::TenureChange(tc_payload) = &self.txs[0].payload else { // this transaction is not a tenure change // (should be unreachable) - warn!("Invalid block -- first transaction is not a tenure change"); + warn!("Invalid block -- first transaction is not a tenure change"; + "consensus_hash" => %self.header.consensus_hash, + "stacks_block_hash" => %self.header.block_hash(), + "stacks_block_id" => %self.header.block_id() + ); return Err(()); }; if tc_payload.cause.expects_sortition() { // not valid - warn!("Invalid block -- no coinbase, but tenure change expects sortition"); + warn!("Invalid block -- no coinbase, but tenure change expects sortition"; + "consensus_hash" => %self.header.consensus_hash, + "stacks_block_hash" => %self.header.block_hash(), + "stacks_block_id" => %self.header.block_id() + ); return Err(()); } @@ -847,24 +1275,39 @@ impl NakamotoBlock { if coinbase_positions[0] != coinbase_idx && tenure_change_positions[0] != tc_idx { // invalid -- expect exactly one sortition-induced tenure change and exactly one coinbase expected, // and the tenure change must be the first transaction and the coinbase must be the second transaction - warn!("Invalid block -- coinbase and/or tenure change txs are in the wrong position -- ({:?}, {:?}) != [{}], [{}]", &coinbase_positions, &tenure_change_positions, coinbase_idx, tc_idx); + warn!("Invalid block -- coinbase and/or tenure change txs are in the wrong position -- ({:?}, {:?}) != [{}], [{}]", &coinbase_positions, &tenure_change_positions, coinbase_idx, tc_idx; + "consensus_hash" => %self.header.consensus_hash, + "stacks_block_hash" => %self.header.block_hash(), + "stacks_block_id" => %self.header.block_id() + ); return Err(()); } let Some(tc_payload) = self.try_get_tenure_change_payload() else { - warn!("Invalid block -- tx at index 0 is not a tenure tx",); + warn!("Invalid block -- tx at index 0 is not a tenure tx"; + "consensus_hash" => %self.header.consensus_hash, + "stacks_block_hash" => %self.header.block_hash(), + "stacks_block_id" => %self.header.block_id() + ); return Err(()); }; if !tc_payload.cause.expects_sortition() { // the only tenure change allowed in a block with a coinbase is a sortition-triggered // tenure change - warn!("Invalid block -- tenure change does not expect a sortition"); + warn!("Invalid block -- tenure change does not expect a sortition"; + "consensus_hash" => %self.header.consensus_hash, + "stacks_block_hash" => %self.header.block_hash(), + "stacks_block_id" => %self.header.block_id() + ); return Err(()); } if tc_payload.previous_tenure_end != self.header.parent_block_id { // discontinuous warn!( "Invalid block -- discontiguous -- {} != {}", - &tc_payload.previous_tenure_end, &self.header.parent_block_id + &tc_payload.previous_tenure_end, &self.header.parent_block_id; + "consensus_hash" => %self.header.consensus_hash, + "stacks_block_hash" => %self.header.block_hash(), + "stacks_block_id" => %self.header.block_id() ); return Err(()); } @@ -875,13 +1318,20 @@ impl NakamotoBlock { // this transaction is not a coinbase (but this should be unreachable) warn!( "Invalid block -- tx index {} is not a coinbase", - coinbase_idx + coinbase_idx; + "consensus_hash" => %self.header.consensus_hash, + "stacks_block_hash" => %self.header.block_hash(), + "stacks_block_id" => %self.header.block_id() ); return Err(()); }; if vrf_proof_opt.is_none() { // not a Nakamoto coinbase - warn!("Invalid block -- no VRF proof in coinbase"); + warn!("Invalid block -- no VRF proof in coinbase"; + "consensus_hash" => %self.header.consensus_hash, + "stacks_block_hash" => %self.header.block_hash(), + "stacks_block_id" => %self.header.block_id() + ); return Err(()); } @@ -890,23 +1340,27 @@ impl NakamotoBlock { /// Verify that the VRF seed of this block's block-commit is the hash of the parent tenure's /// VRF seed. - pub fn validate_vrf_seed( + pub fn validate_vrf_seed( &self, sortdb_conn: &Connection, - chainstate_conn: &Connection, + chainstate_conn: &mut SDBI, block_commit: &LeaderBlockCommitOp, ) -> Result<(), ChainstateError> { // the block-commit from the miner who created this coinbase must have a VRF seed that // is the hash of the parent tenure's VRF proof. + // Do the query relative to the parent block ID, since this block may not be processed yet. let parent_vrf_proof = NakamotoChainState::get_parent_vrf_proof( chainstate_conn, + &self.header.parent_block_id, sortdb_conn, &self.header.consensus_hash, &block_commit.txid, )?; if !block_commit.new_seed.is_from_proof(&parent_vrf_proof) { warn!("Invalid Nakamoto block-commit: seed does not match parent VRF proof"; - "block_id" => %self.block_id(), + "consensus_hash" => %self.header.consensus_hash, + "stacks_block_hash" => %self.header.block_hash(), + "stacks_block_id" => %self.block_id(), "commit_seed" => %block_commit.new_seed, "proof_seed" => %VRFSeed::from_proof(&parent_vrf_proof), "parent_vrf_proof" => %parent_vrf_proof.to_hex(), @@ -928,8 +1382,9 @@ impl NakamotoBlock { let recovered_miner_pubk = self.header.recover_miner_pk().ok_or_else(|| { warn!( "Nakamoto Stacks block downloaded with unrecoverable miner public key"; - "block_hash" => %self.header.block_hash(), - "block_id" => %self.header.block_id(), + "consensus_hash" => %self.header.consensus_hash, + "stacks_block_hash" => %self.header.block_hash(), + "stacks_block_id" => %self.header.block_id() ); return ChainstateError::InvalidStacksBlock("Unrecoverable miner public key".into()); })?; @@ -947,8 +1402,9 @@ impl NakamotoBlock { if &recovered_miner_hash160 != miner_pubkey_hash160 { warn!( "Nakamoto Stacks block signature mismatch: {recovered_miner_hash160} != {miner_pubkey_hash160} from leader-key"; - "block_hash" => %self.header.block_hash(), - "block_id" => %self.header.block_id(), + "consensus_hash" => %self.header.consensus_hash, + "stacks_block_hash" => %self.header.block_hash(), + "stacks_block_id" => %self.header.block_id() ); return Err(ChainstateError::InvalidStacksBlock( "Invalid miner signature".into(), @@ -975,8 +1431,9 @@ impl NakamotoBlock { if tc_payload.pubkey_hash != recovered_miner_hash160 { warn!( "Invalid tenure-change transaction -- bad miner pubkey hash160"; - "block_hash" => %self.header.block_hash(), - "block_id" => %self.header.block_id(), + "consensus_hash" => %self.header.consensus_hash, + "stacks_block_hash" => %self.header.block_hash(), + "stacks_block_id" => %self.header.block_id(), "pubkey_hash" => %tc_payload.pubkey_hash, "recovered_miner_hash160" => %recovered_miner_hash160 ); @@ -990,9 +1447,9 @@ impl NakamotoBlock { if tc_payload.tenure_consensus_hash != self.header.consensus_hash { warn!( "Invalid tenure-change transaction -- bad consensus hash"; - "block_hash" => %self.header.block_hash(), - "block_id" => %self.header.block_id(), "consensus_hash" => %self.header.consensus_hash, + "stacks_block_hash" => %self.header.block_hash(), + "stacks_block_id" => %self.header.block_id(), "tc_payload.tenure_consensus_hash" => %tc_payload.tenure_consensus_hash ); return Err(ChainstateError::InvalidStacksBlock( @@ -1038,9 +1495,11 @@ impl NakamotoBlock { if !valid { warn!("Invalid Nakamoto block: leader VRF key did not produce a valid proof"; - "block_id" => %self.block_id(), - "leader_public_key" => %leader_vrf_key.to_hex(), - "sortition_hash" => %sortition_hash + "consensus_hash" => %self.header.consensus_hash, + "stacks_block_hash" => %self.header.block_hash(), + "stacks_block_id" => %self.header.block_id(), + "leader_public_key" => %leader_vrf_key.to_hex(), + "sortition_hash" => %sortition_hash ); return Err(ChainstateError::InvalidStacksBlock( "Invalid Nakamoto block: leader VRF key did not produce a valid proof".into(), @@ -1075,8 +1534,10 @@ impl NakamotoBlock { // this block's consensus hash must match the sortition that selected it if tenure_burn_chain_tip.consensus_hash != self.header.consensus_hash { warn!("Invalid Nakamoto block: consensus hash does not match sortition"; - "consensus_hash" => %self.header.consensus_hash, - "sortition.consensus_hash" => %tenure_burn_chain_tip.consensus_hash + "sortition.consensus_hash" => %tenure_burn_chain_tip.consensus_hash, + "consensus_hash" => %self.header.consensus_hash, + "stacks_block_hash" => %self.header.block_hash(), + "stacks_block_id" => %self.header.block_id(), ); return Err(ChainstateError::InvalidStacksBlock( "Invalid Nakamoto block: invalid consensus hash".into(), @@ -1087,8 +1548,11 @@ impl NakamotoBlock { if let Some(expected_burn) = expected_burn { if self.header.burn_spent != expected_burn { warn!("Invalid Nakamoto block header: invalid total burns"; - "header.burn_spent" => self.header.burn_spent, - "expected_burn" => expected_burn, + "header.burn_spent" => self.header.burn_spent, + "expected_burn" => expected_burn, + "consensus_hash" => %self.header.consensus_hash, + "stacks_block_hash" => %self.header.block_hash(), + "stacks_block_id" => %self.header.block_id() ); return Err(ChainstateError::InvalidStacksBlock( "Invalid Nakamoto block: invalid total burns".into(), @@ -1163,7 +1627,11 @@ impl NakamotoBlock { } } else if valid_tenure_start.is_err() { // bad tenure change - warn!("Not a well-formed tenure-start block"); + warn!("Not a well-formed tenure-start block"; + "consensus_hash" => %self.header.consensus_hash, + "stacks_block_hash" => %self.header.block_hash(), + "stacks_block_id" => %self.header.block_id() + ); return false; } let valid_tenure_extend = self.is_wellformed_tenure_extend_block(); @@ -1173,7 +1641,11 @@ impl NakamotoBlock { } } else if valid_tenure_extend.is_err() { // bad tenure extend - warn!("Not a well-formed tenure-extend block"); + warn!("Not a well-formed tenure-extend block"; + "consensus_hash" => %self.header.consensus_hash, + "stacks_block_hash" => %self.header.block_hash(), + "stacks_block_id" => %self.header.block_id() + ); return false; } if !StacksBlock::validate_transactions_static_epoch(&self.txs, epoch_id) { @@ -1264,16 +1736,23 @@ impl NakamotoChainState { /// If there exists a ready Nakamoto block, then this method returns Ok(Some(..)) with the /// receipt. Otherwise, it returns Ok(None). /// + /// Canonical sortition tip is a pointer to the current canonical sortition tip. + /// this is used to store block processed information in the sortition db. + /// /// It returns Err(..) on DB error, or if the child block does not connect to the parent. /// The caller should keep calling this until it gets Ok(None) pub fn process_next_nakamoto_block<'a, T: BlockEventDispatcher>( stacks_chain_state: &mut StacksChainState, - sort_tx: &mut SortitionHandleTx, + sort_db: &mut SortitionDB, + canonical_sortition_tip: &SortitionId, dispatcher_opt: Option<&'a T>, ) -> Result, ChainstateError> { + #[cfg(test)] + test_stall::stall_block_processing(); + let nakamoto_blocks_db = stacks_chain_state.nakamoto_blocks_db(); let Some((next_ready_block, block_size)) = - nakamoto_blocks_db.next_ready_nakamoto_block(stacks_chain_state.db(), sort_tx)? + nakamoto_blocks_db.next_ready_nakamoto_block(stacks_chain_state.db())? else { // no more blocks test_debug!("No more Nakamoto blocks to process"); @@ -1284,7 +1763,7 @@ impl NakamotoChainState { // find corresponding snapshot let next_ready_block_snapshot = SortitionDB::get_block_snapshot_consensus( - sort_tx, + sort_db.conn(), &next_ready_block.header.consensus_hash, )? .unwrap_or_else(|| { @@ -1297,10 +1776,39 @@ impl NakamotoChainState { debug!("Process staging Nakamoto block"; "consensus_hash" => %next_ready_block.header.consensus_hash, - "block_hash" => %next_ready_block.header.block_hash(), + "stacks_block_hash" => %next_ready_block.header.block_hash(), + "stacks_block_id" => %next_ready_block.header.block_id(), "burn_block_hash" => %next_ready_block_snapshot.burn_header_hash ); + let elected_height = sort_db + .get_consensus_hash_height(&next_ready_block.header.consensus_hash)? + .ok_or_else(|| ChainstateError::NoSuchBlockError)?; + let elected_in_cycle = sort_db + .pox_constants + .block_height_to_reward_cycle(sort_db.first_block_height, elected_height) + .ok_or_else(|| { + ChainstateError::InvalidStacksBlock( + "Elected in block height before first_block_height".into(), + ) + })?; + let active_reward_set = OnChainRewardSetProvider::(None).read_reward_set_nakamoto_of_cycle( + elected_in_cycle, + stacks_chain_state, + sort_db, + &next_ready_block.header.parent_block_id, + true, + ).map_err(|e| { + warn!( + "Cannot process Nakamoto block: could not load reward set that elected the block"; + "err" => ?e, + "consensus_hash" => %next_ready_block.header.consensus_hash, + "stacks_block_hash" => %next_ready_block.header.block_hash(), + "stacks_block_id" => %next_ready_block.header.block_id(), + "parent_block_id" => %next_ready_block.header.parent_block_id, + ); + ChainstateError::NoSuchBlockError + })?; let (mut chainstate_tx, clarity_instance) = stacks_chain_state.chainstate_tx_begin()?; // find parent header @@ -1310,7 +1818,8 @@ impl NakamotoChainState { // no parent; cannot process yet debug!("Cannot process Nakamoto block: missing parent header"; "consensus_hash" => %next_ready_block.header.consensus_hash, - "block_hash" => %next_ready_block.header.block_hash(), + "stacks_block_hash" => %next_ready_block.header.block_hash(), + "stacks_block_id" => %next_ready_block.header.block_id(), "parent_block_id" => %next_ready_block.header.parent_block_id ); return Ok(None); @@ -1327,7 +1836,10 @@ impl NakamotoChainState { let msg = "Discontinuous Nakamoto Stacks block"; warn!("{}", &msg; "child parent_block_id" => %next_ready_block.header.parent_block_id, - "expected parent_block_id" => %parent_block_id + "expected parent_block_id" => %parent_block_id, + "consensus_hash" => %next_ready_block.header.consensus_hash, + "stacks_block_hash" => %next_ready_block.header.block_hash(), + "stacks_block_id" => %next_ready_block.header.block_id() ); let staging_block_tx = stacks_chain_state.staging_db_tx_begin()?; staging_block_tx.set_block_orphaned(&block_id)?; @@ -1335,6 +1847,85 @@ impl NakamotoChainState { return Err(ChainstateError::InvalidStacksBlock(msg.into())); } + // set the sortition handle's pointer to the block's burnchain view. + // this is either: + // (1) set by the tenure change tx if one exists + // (2) the same as parent block id + + let burnchain_view = if let Some(tenure_change) = next_ready_block.get_tenure_tx_payload() { + if let Some(ref parent_burn_view) = parent_header_info.burn_view { + // check that the tenure_change's burn view descends from the parent + let parent_burn_view_sn = SortitionDB::get_block_snapshot_consensus( + sort_db.conn(), + parent_burn_view, + )? + .ok_or_else(|| { + warn!( + "Cannot process Nakamoto block: could not find parent block's burnchain view"; + "consensus_hash" => %next_ready_block.header.consensus_hash, + "stacks_block_hash" => %next_ready_block.header.block_hash(), + "stacks_block_id" => %next_ready_block.header.block_id(), + "parent_block_id" => %next_ready_block.header.parent_block_id + ); + ChainstateError::InvalidStacksBlock("Failed to load burn view of parent block ID".into()) + })?; + let handle = sort_db.index_handle_at_ch(&tenure_change.burn_view_consensus_hash)?; + let connected_sort_id = get_ancestor_sort_id(&handle, parent_burn_view_sn.block_height, &handle.context.chain_tip)? + .ok_or_else(|| { + warn!( + "Cannot process Nakamoto block: could not find parent block's burnchain view"; + "consensus_hash" => %next_ready_block.header.consensus_hash, + "stacks_block_hash" => %next_ready_block.header.block_hash(), + "stacks_block_id" => %next_ready_block.header.block_id(), + "parent_block_id" => %next_ready_block.header.parent_block_id + ); + ChainstateError::InvalidStacksBlock("Failed to load burn view of parent block ID".into()) + })?; + if connected_sort_id != parent_burn_view_sn.sortition_id { + warn!( + "Cannot process Nakamoto block: parent block's burnchain view does not connect to own burn view"; + "consensus_hash" => %next_ready_block.header.consensus_hash, + "stacks_block_hash" => %next_ready_block.header.block_hash(), + "stacks_block_id" => %next_ready_block.header.block_id(), + "parent_block_id" => %next_ready_block.header.parent_block_id + ); + return Err(ChainstateError::InvalidStacksBlock( + "Does not connect to burn view of parent block ID".into(), + )); + } + } + tenure_change.burn_view_consensus_hash + } else { + parent_header_info.burn_view.clone().ok_or_else(|| { + warn!( + "Cannot process Nakamoto block: parent block does not have a burnchain view and current block has no tenure tx"; + "consensus_hash" => %next_ready_block.header.consensus_hash, + "stacks_block_hash" => %next_ready_block.header.block_hash(), + "stacks_block_id" => %next_ready_block.header.block_id(), + "parent_block_id" => %next_ready_block.header.parent_block_id + ); + ChainstateError::InvalidStacksBlock("Failed to load burn view of parent block ID".into()) + })? + }; + let Some(burnchain_view_sn) = + SortitionDB::get_block_snapshot_consensus(sort_db.conn(), &burnchain_view)? + else { + // This should be checked already during block acceptance and parent block processing + // - The check for expected burns returns `NoSuchBlockError` if the burnchain view + // could not be found for a block with a tenure tx. + // We error here anyways, but the check during block acceptance makes sure that the staging + // db doesn't get into a situation where it continuously tries to retry such a block (because + // such a block shouldn't land in the staging db). + warn!( + "Cannot process Nakamoto block: failed to find Sortition ID associated with burnchain view"; + "consensus_hash" => %next_ready_block.header.consensus_hash, + "stacks_block_hash" => %next_ready_block.header.block_hash(), + "stacks_block_id" => %next_ready_block.header.block_id(), + "burn_view_consensus_hash" => %burnchain_view, + ); + return Ok(None); + }; + // find commit and sortition burns if this is a tenure-start block let Ok(new_tenure) = next_ready_block.is_wellformed_tenure_start_block() else { return Err(ChainstateError::InvalidStacksBlock( @@ -1344,24 +1935,22 @@ impl NakamotoChainState { let (commit_burn, sortition_burn) = if new_tenure { // find block-commit to get commit-burn - let block_commit = sort_tx - .get_block_commit( - &next_ready_block_snapshot.winning_block_txid, - &next_ready_block_snapshot.sortition_id, - )? - .expect("FATAL: no block-commit for tenure-start block"); + let block_commit = SortitionDB::get_block_commit( + sort_db.conn(), + &next_ready_block_snapshot.winning_block_txid, + &next_ready_block_snapshot.sortition_id, + )? + .expect("FATAL: no block-commit for tenure-start block"); - let sort_burn = SortitionDB::get_block_burn_amount( - sort_tx.deref().deref(), - &next_ready_block_snapshot, - )?; + let sort_burn = + SortitionDB::get_block_burn_amount(sort_db.conn(), &next_ready_block_snapshot)?; (block_commit.burn_fee, sort_burn) } else { (0, 0) }; // attach the block to the chain state and calculate the next chain tip. - let pox_constants = sort_tx.context.pox_constants.clone(); + let pox_constants = sort_db.pox_constants.clone(); // NOTE: because block status is updated in a separate transaction, we need `chainstate_tx` // and `clarity_instance` to go out of scope before we can issue the it (since we need a @@ -1372,10 +1961,13 @@ impl NakamotoChainState { // though it will always be None), which gets the borrow-checker to believe that it's safe // to access `stacks_chain_state` again. In the `Ok(..)` case, it's instead sufficient so // simply commit the block before beginning the second transaction to mark it processed. + + let mut burn_view_handle = sort_db.index_handle(&burnchain_view_sn.sortition_id); let (ok_opt, err_opt) = match NakamotoChainState::append_block( &mut chainstate_tx, clarity_instance, - sort_tx, + &mut burn_view_handle, + &burnchain_view, &pox_constants, &parent_header_info, &next_ready_block_snapshot.burn_header_hash, @@ -1388,6 +1980,7 @@ impl NakamotoChainState { block_size, commit_burn, sortition_burn, + &active_reward_set, ) { Ok(next_chain_tip_info) => (Some(next_chain_tip_info), None), Err(e) => (None, Some(e)), @@ -1402,7 +1995,8 @@ impl NakamotoChainState { "Failed to append {}/{}: {:?}", &next_ready_block.header.consensus_hash, &next_ready_block.header.block_hash(), - &e + &e; + "stacks_block_id" => %next_ready_block.header.block_id() ); // as a separate transaction, mark this block as processed and orphaned. @@ -1426,21 +2020,14 @@ impl NakamotoChainState { next_ready_block.header.consensus_hash ); - // set stacks block accepted - sort_tx.set_stacks_block_accepted( - &next_ready_block.header.consensus_hash, - &next_ready_block.header.block_hash(), - next_ready_block.header.chain_length, - )?; - // this will panic if the Clarity commit fails. clarity_commit.commit(); chainstate_tx.commit() - .unwrap_or_else(|e| { - error!("Failed to commit chainstate transaction after committing Clarity block. The chainstate database is now corrupted."; - "error" => ?e); - panic!() - }); + .unwrap_or_else(|e| { + error!("Failed to commit chainstate transaction after committing Clarity block. The chainstate database is now corrupted."; + "error" => ?e); + panic!() + }); // as a separate transaction, mark this block as processed. // This is done separately so that the staging blocks DB, which receives writes @@ -1450,7 +2037,23 @@ impl NakamotoChainState { // succeeds, since *we have already processed* the block. Self::infallible_set_block_processed(stacks_chain_state, &block_id); - let signer_bitvec = (&next_ready_block).header.signer_bitvec.clone(); + let signer_bitvec = (&next_ready_block).header.pox_treatment.clone(); + + // set stacks block accepted + let mut sort_tx = sort_db.tx_handle_begin(canonical_sortition_tip)?; + sort_tx.set_stacks_block_accepted( + &next_ready_block.header.consensus_hash, + &next_ready_block.header.block_hash(), + next_ready_block.header.chain_length, + )?; + + sort_tx + .commit() + .unwrap_or_else(|e| { + error!("Failed to commit sortition db transaction after committing chainstate and clarity block. The chainstate database is now corrupted."; + "error" => ?e); + panic!() + }); // announce the block, if we're connected to an event dispatcher if let Some(dispatcher) = dispatcher_opt { @@ -1494,7 +2097,7 @@ impl NakamotoChainState { /// however, will flag a block as invalid in this case, because the parent must be available in /// order to process a block. pub(crate) fn get_expected_burns( - sort_handle: &mut SH, + sort_handle: &SH, chainstate_conn: &Connection, block: &NakamotoBlock, ) -> Result, ChainstateError> { @@ -1598,7 +2201,7 @@ impl NakamotoChainState { warn!( "Invalid Nakamoto block, could not validate on burnchain"; "consensus_hash" => %consensus_hash, - "block_hash" => %block_hash, + "stacks_block_hash" => %block_hash, "error" => ?e ); @@ -1628,56 +2231,67 @@ impl NakamotoChainState { Ok(()) } - /// Insert a Nakamoto block into the staging blocks DB - pub(crate) fn store_block( + /// Insert a Nakamoto block into the staging blocks DB. + /// We only store a block in the following cases: + /// + /// * No block with this block's sighash exists in the DB + /// * A block with this block's sighash exists, AND + /// * this block represents more signing power + /// + /// If neither of the above is true, then this is a no-op. + pub(crate) fn store_block_if_better( staging_db_tx: &NakamotoStagingBlocksTx, - block: NakamotoBlock, + block: &NakamotoBlock, burn_attachable: bool, - ) -> Result<(), ChainstateError> { + signing_weight: u32, + obtain_method: NakamotoBlockObtainMethod, + ) -> Result { let block_id = block.block_id(); - let Ok(tenure_start) = block.is_wellformed_tenure_start_block() else { - return Err(ChainstateError::InvalidStacksBlock( - "Tried to store a tenure-start block that is not well-formed".into(), - )); - }; + let block_hash = block.header.block_hash(); - staging_db_tx.execute( - "INSERT INTO nakamoto_staging_blocks ( - block_hash, - consensus_hash, - parent_block_id, - is_tenure_start, - burn_attachable, - orphaned, - processed, - - height, - index_block_hash, - download_time, - arrival_time, - processed_time, - data - ) VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7, ?8, ?9, ?10, ?11, ?12, ?13)", - params![ - &block.header.block_hash(), - &block.header.consensus_hash, - &block.header.parent_block_id, - &tenure_start, - if burn_attachable { 1 } else { 0 }, - 0, - 0, - u64_to_sql(block.header.chain_length)?, - &block_id, - 0, - 0, - 0, - block.serialize_to_vec(), - ], - )?; - if burn_attachable { - staging_db_tx.set_burn_block_processed(&block.header.consensus_hash)?; + // case 1 -- no block with this sighash exists. + if staging_db_tx.try_store_block_with_new_signer_sighash( + block, + burn_attachable, + signing_weight, + obtain_method, + )? { + debug!("Stored block with new sighash"; + "block_id" => %block_id, + "block_hash" => %block_hash); + return Ok(true); } - Ok(()) + + // case 2 -- the block exists. Consider replacing it, but only if its + // signing weight is higher. + let (existing_block_id, _processed, orphaned, existing_signing_weight) = staging_db_tx.conn().get_block_processed_and_signed_weight(&block.header.consensus_hash, &block_hash)? + .ok_or_else(|| { + // this should be unreachable -- there's no record of this block + error!("Could not store block {} ({}) with block hash {} -- no record of its processed status or signing weight!", &block_id, &block.header.consensus_hash, &block_hash); + ChainstateError::NoSuchBlockError + })?; + + if orphaned { + // nothing to do + debug!("Will not store alternative copy of block {} ({}) with block hash {}, since a block with the same block hash was orphaned", &block_id, &block.header.consensus_hash, &block_hash); + return Ok(false); + } + + let ret = if existing_signing_weight < signing_weight { + staging_db_tx.replace_block(block, signing_weight, obtain_method)?; + debug!("Replaced block"; + "existing_block_id" => %existing_block_id, + "block_id" => %block_id, + "block_hash" => %block_hash, + "existing_signing_weight" => existing_signing_weight, + "signing_weight" => signing_weight); + true + } else { + debug!("Will not store alternative copy of block {} ({}) with block hash {}, since it has less signing power", &block_id, &block.header.consensus_hash, &block_hash); + false + }; + + return Ok(ret); } /// Accept a Nakamoto block into the staging blocks DB. @@ -1690,11 +2304,12 @@ impl NakamotoChainState { /// Returns true if we stored the block; false if not. pub fn accept_block( config: &ChainstateConfig, - block: NakamotoBlock, + block: &NakamotoBlock, db_handle: &mut SortitionHandleConn, staging_db_tx: &NakamotoStagingBlocksTx, headers_conn: &Connection, - aggregate_public_key: &Point, + reward_set: RewardSet, + obtain_method: NakamotoBlockObtainMethod, ) -> Result { test_debug!("Consider Nakamoto block {}", &block.block_id()); // do nothing if we already have this block @@ -1723,119 +2338,53 @@ impl NakamotoChainState { // it's okay if this fails because we might not have the parent block yet. It will be // checked on `::append_block()` - let expected_burn_opt = Self::get_expected_burns(db_handle, headers_conn, &block)?; + let expected_burn_opt = Self::get_expected_burns(db_handle, headers_conn, block)?; // this block must be consistent with its miner's leader-key and block-commit, and must // contain only transactions that are valid in this epoch. if let Err(e) = Self::validate_nakamoto_block_burnchain( db_handle, expected_burn_opt, - &block, + block, config.mainnet, config.chain_id, ) { warn!("Unacceptable Nakamoto block; will not store"; - "block_id" => %block.block_id(), + "stacks_block_id" => %block.block_id(), "error" => ?e ); return Ok(false); }; - let schnorr_signature = &block.header.signer_signature.0; - if !db_handle.expects_signer_signature( - &block.header.consensus_hash, - schnorr_signature, - &block.header.signer_signature_hash().0, - aggregate_public_key, - )? { - let msg = format!( - "Received block, but the signer signature does not match the active stacking cycle" - ); - warn!("{}", msg; "aggregate_key" => %aggregate_public_key); - return Err(ChainstateError::InvalidStacksBlock(msg)); - } + let signing_weight = match block.header.verify_signer_signatures(&reward_set) { + Ok(x) => x, + Err(e) => { + warn!("Received block, but the signer signatures are invalid"; + "block_id" => %block.block_id(), + "error" => ?e, + ); + return Err(e); + } + }; // if we pass all the tests, then along the way, we will have verified (in // Self::validate_nakamoto_block_burnchain) that the consensus hash of this block is on the // same sortition history as `db_handle` (and thus it must be burn_attachable) let burn_attachable = true; - let _block_id = block.block_id(); - Self::store_block(staging_db_tx, block, burn_attachable)?; - test_debug!("Stored Nakamoto block {}", &_block_id); - Ok(true) - } - - /// Get the aggregate public key for the given block from the signers-voting contract - pub(crate) fn load_aggregate_public_key( - sortdb: &SortitionDB, - sort_handle: &SH, - chainstate: &mut StacksChainState, - for_burn_block_height: u64, - at_block_id: &StacksBlockId, - warn_if_not_found: bool, - ) -> Result { - // Get the current reward cycle - let Some(rc) = sort_handle.pox_constants().block_height_to_reward_cycle( - sort_handle.first_burn_block_height(), - for_burn_block_height, - ) else { - // This should be unreachable, but we'll return an error just in case. - let msg = format!( - "BUG: Failed to determine reward cycle of burn block height: {}.", - for_burn_block_height - ); - warn!("{msg}"); - return Err(ChainstateError::InvalidStacksBlock(msg)); - }; - - test_debug!( - "get-approved-aggregate-key at block {}, cycle {}", - at_block_id, - rc - ); - match chainstate.get_aggregate_public_key_pox_4(sortdb, at_block_id, rc)? { - Some(key) => Ok(key), - None => { - // this can happen for a whole host of reasons - if warn_if_not_found { - warn!( - "Failed to get aggregate public key"; - "block_id" => %at_block_id, - "reward_cycle" => rc, - ); - } - Err(ChainstateError::InvalidStacksBlock( - "Failed to get aggregate public key".into(), - )) - } - } - } - - /// Get the aggregate public key for a block. - /// TODO: The block at which the aggregate public key is queried needs to be better defined. - /// See https://github.com/stacks-network/stacks-core/issues/4109 - pub fn get_aggregate_public_key( - chainstate: &mut StacksChainState, - sortdb: &SortitionDB, - sort_handle: &SH, - block: &NakamotoBlock, - ) -> Result { - let block_sn = - SortitionDB::get_block_snapshot_consensus(sortdb.conn(), &block.header.consensus_hash)? - .ok_or(ChainstateError::DBError(DBError::NotFoundError))?; - let aggregate_key_block_header = - Self::get_canonical_block_header(chainstate.db(), sortdb)?.unwrap(); - - let aggregate_public_key = Self::load_aggregate_public_key( - sortdb, - sort_handle, - chainstate, - block_sn.block_height, - &aggregate_key_block_header.index_block_hash(), - true, + let ret = Self::store_block_if_better( + staging_db_tx, + block, + burn_attachable, + signing_weight, + obtain_method, )?; - Ok(aggregate_public_key) + if ret { + test_debug!("Stored Nakamoto block {}", &block.block_id()); + } else { + test_debug!("Did NOT store Nakamoto block {}", &block.block_id()); + } + Ok(ret) } /// Return the total ExecutionCost consumed during the tenure up to and including @@ -1875,52 +2424,33 @@ impl NakamotoChainState { tip_index_hash: &StacksBlockId, coinbase_height: u64, ) -> Result, ChainstateError> { - // query for block header info at the tenure-height, then check if in fork - let qry = "SELECT DISTINCT tenure_id_consensus_hash AS consensus_hash FROM nakamoto_tenures WHERE coinbase_height = ?1"; - - let candidate_chs: Vec = - query_rows(tx.tx(), qry, &[u64_to_sql(coinbase_height)?])?; - - if candidate_chs.len() == 0 { - // no nakamoto_tenures at that tenure height, check if there's a stack block header where - // block_height = coinbase_height - let Some(ancestor_at_height) = tx - .get_ancestor_block_hash(coinbase_height, tip_index_hash)? - .map(|ancestor| Self::get_block_header(tx.tx(), &ancestor)) - .transpose()? - .flatten() - else { - warn!("No such epoch2 ancestor"; - "coinbase_height" => coinbase_height, - "tip_index_hash" => %tip_index_hash, - ); - return Ok(None); - }; - // only return if it is an epoch-2 block, because that's - // the only case where block_height can be interpreted as - // tenure height. - if ancestor_at_height.is_epoch_2_block() { - return Ok(Some(ancestor_at_height)); - } else { - return Ok(None); - } + // nakamoto block? + if let Some(block_id) = + tx.get_nakamoto_block_id_at_coinbase_height(tip_index_hash, coinbase_height)? + { + return Self::get_block_header_nakamoto(tx.sqlite(), &block_id); } - for candidate_ch in candidate_chs.into_iter() { - let Some(candidate) = Self::get_block_header_by_consensus_hash(tx, &candidate_ch)? - else { - continue; - }; - let Ok(Some(ancestor_at_height)) = - tx.get_ancestor_block_hash(candidate.stacks_block_height, tip_index_hash) - else { - // if there's an error or no result, this candidate doesn't match, so try next candidate - continue; - }; - if ancestor_at_height == candidate.index_block_hash() { - return Ok(Some(candidate)); - } + // epcoh2 block? + let Some(ancestor_at_height) = tx + .get_ancestor_block_hash(coinbase_height, tip_index_hash)? + .map(|ancestor| Self::get_block_header(tx.tx(), &ancestor)) + .transpose()? + .flatten() + else { + warn!("No such epoch2 ancestor"; + "coinbase_height" => coinbase_height, + "tip_index_hash" => %tip_index_hash, + ); + return Ok(None); + }; + // only return if it is an epoch-2 block, because that's + // the only case where block_height can be interpreted as + // tenure height. + if ancestor_at_height.is_epoch_2_block() { + return Ok(Some(ancestor_at_height)); } + Ok(None) } @@ -1991,7 +2521,22 @@ impl NakamotoChainState { Ok(result.is_some()) } + /// Does an epoch2 block header exist? + pub fn has_block_header_epoch2( + chainstate_conn: &Connection, + index_block_hash: &StacksBlockId, + ) -> Result { + let sql = "SELECT 1 FROM block_headers WHERE index_block_hash = ?1"; + let result: Option = + query_row_panic(chainstate_conn, sql, &[&index_block_hash], || { + "FATAL: multiple rows for the same block hash".to_string() + })?; + + Ok(result.is_some()) + } + /// Load the canonical Stacks block header (either epoch-2 rules or Nakamoto) + /// DO NOT CALL during Stacks block processing (including during Clarity VM evaluation). This function returns the latest data known to the node, which may not have been at the time of original block assembly. pub fn get_canonical_block_header( chainstate_conn: &Connection, sortdb: &SortitionDB, @@ -2005,55 +2550,109 @@ impl NakamotoChainState { } /// Get the tenure-start block header of a given consensus hash. - /// It might be an epoch 2.x block header - pub fn get_block_header_by_consensus_hash( - chainstate_conn: &Connection, + /// For Nakamoto blocks, this is the first block in the tenure identified by the consensus + /// hash. + /// For epoch2 blocks, this is simply the block whose winning sortition happened in the + /// sortition identified by the consensus hash. + /// + /// `tip_block_id` is the chain tip from which to perform the query. + pub fn get_tenure_start_block_header( + chainstate_conn: &mut SDBI, + tip_block_id: &StacksBlockId, consensus_hash: &ConsensusHash, ) -> Result, ChainstateError> { - let nakamoto_header_info = - Self::get_nakamoto_tenure_start_block_header(chainstate_conn, consensus_hash)?; - if nakamoto_header_info.is_some() { - return Ok(nakamoto_header_info); + // nakamoto? + if let Some(hdr) = Self::get_nakamoto_tenure_start_block_header( + chainstate_conn, + tip_block_id, + consensus_hash, + )? { + return Ok(Some(hdr)); } - // parent might be epoch 2 + // epoch2? let epoch2_header_info = StacksChainState::get_stacks_block_header_info_by_consensus_hash( - chainstate_conn, + chainstate_conn.sqlite(), consensus_hash, )?; Ok(epoch2_header_info) } + /// Get the first block header in a Nakamoto tenure + pub fn get_nakamoto_tenure_start_block_header( + chainstate_conn: &mut SDBI, + tip_block_id: &StacksBlockId, + consensus_hash: &ConsensusHash, + ) -> Result, ChainstateError> { + let Some(block_id) = + chainstate_conn.get_tenure_start_block_id(tip_block_id, consensus_hash)? + else { + return Ok(None); + }; + Self::get_block_header_nakamoto(chainstate_conn.sqlite(), &block_id) + } + + /// Get the highest block in the given tenure. + /// Only works on Nakamoto blocks. + /// TODO: unit test + pub fn get_highest_block_header_in_tenure( + chainstate_conn: &mut SDBI, + tip_block_id: &StacksBlockId, + consensus_hash: &ConsensusHash, + ) -> Result, ChainstateError> { + let Some(block_id) = + chainstate_conn.get_highest_block_id_in_tenure(tip_block_id, consensus_hash)? + else { + return Ok(None); + }; + Self::get_block_header_nakamoto(chainstate_conn.sqlite(), &block_id) + } + /// Get the VRF proof for a Stacks block. - /// This works for either Nakamoto or epoch 2.x - pub fn get_block_vrf_proof( - chainstate_conn: &Connection, + /// For Nakamoto blocks, this is the VRF proof contained in the coinbase of the tenure-start + /// block of the given tenure identified by the consensus hash. + /// + /// For epoch 2.x blocks, this is the VRF proof in the block header itself, whose sortition is + /// identified by the consensus hash. + pub fn get_block_vrf_proof( + chainstate_conn: &mut SDBI, + tip_block_id: &StacksBlockId, consensus_hash: &ConsensusHash, ) -> Result, ChainstateError> { - let Some(start_header) = NakamotoChainState::get_block_header_by_consensus_hash( + let Some(start_header) = NakamotoChainState::get_tenure_start_block_header( chainstate_conn, + tip_block_id, consensus_hash, )? else { + warn!("No tenure-start block"; + "consensus_hash" => %consensus_hash, + "tip_block_id" => %tip_block_id); return Ok(None); }; let vrf_proof = match start_header.anchored_header { StacksBlockHeaderTypes::Epoch2(epoch2_header) => Some(epoch2_header.proof), StacksBlockHeaderTypes::Nakamoto(..) => { - NakamotoChainState::get_nakamoto_tenure_vrf_proof(chainstate_conn, consensus_hash)? + NakamotoChainState::get_nakamoto_tenure_vrf_proof( + chainstate_conn.sqlite(), + &start_header.index_block_hash(), + )? } }; Ok(vrf_proof) } - /// Get the VRF proof of the parent tenure (either Nakamoto or epoch 2.x) of the block + /// Get the VRF proof of the parent tenure (either Nakamoto or epoch 2.x) of the tenure /// identified by the given consensus hash. - /// The parent must already have been processed. + /// The parent tenure's tenure-start block must already have been processed. /// - /// `consensus_hash` identifies the child block. - /// `block_commit_txid` identifies the child block's tenure's block-commit tx + /// `tip_block_id` identifies the tip of the chain history to search. It can be the child + /// block's block ID, or any descendant. + /// `consensus_hash` identifies the child block's tenure. + /// `block_commit_txid` identifies the child block's tenure's block-commit tx, which in turn + /// contains the hash of the start-block of the tenure prior to the child's tenure. /// /// Returns the proof of this block's parent tenure on success. /// @@ -2062,8 +2661,9 @@ impl NakamotoChainState { /// /// Returns NoSuchBlockError if the block header for `consensus_hash` does not exist, or if the /// parent block header info does not exist (i.e. the chainstate DB is missing something) - pub fn get_parent_vrf_proof( - chainstate_conn: &Connection, + pub fn get_parent_vrf_proof( + chainstate_conn: &mut SDBI, + tip_block_id: &StacksBlockId, sortdb_conn: &Connection, consensus_hash: &ConsensusHash, block_commit_txid: &Txid, @@ -2088,10 +2688,12 @@ impl NakamotoChainState { )?; let parent_vrf_proof = - Self::get_block_vrf_proof(chainstate_conn, &parent_sn.consensus_hash)? + Self::get_block_vrf_proof(chainstate_conn, tip_block_id, &parent_sn.consensus_hash)? .ok_or(ChainstateError::NoSuchBlockError) .map_err(|e| { - warn!("Nakamoto block has no parent"; + warn!("Could not find parent VRF proof"; + "tip_block_id" => %tip_block_id, + "parent consensus_hash" => %parent_sn.consensus_hash, "block consensus_hash" => %consensus_hash); e })?; @@ -2110,7 +2712,7 @@ impl NakamotoChainState { block_hash: &BlockHeaderHash, ) -> Result, ChainstateError> { let sql = "SELECT processed, orphaned FROM nakamoto_staging_blocks WHERE consensus_hash = ?1 AND block_hash = ?2"; - let args: &[&dyn ToSql] = &[consensus_hash, block_hash]; + let args = params![consensus_hash, block_hash]; let Some((processed, orphaned)) = query_row_panic(&staging_blocks_conn, sql, args, || { "FATAL: multiple rows for the same consensus hash and block hash".to_string() }) @@ -2141,22 +2743,27 @@ impl NakamotoChainState { } /// Get the VRF proof for a Nakamoto block, if it exists. + /// This must be the tenure-start Nakamoto block ID /// Returns None if the Nakamoto block's VRF proof is not found (e.g. because there is no - /// Nakamoto block) + /// Nakamoto block, or becuase this isn't a tenure-start block) pub fn get_nakamoto_tenure_vrf_proof( chainstate_conn: &Connection, - consensus_hash: &ConsensusHash, + tenure_start_block_id: &StacksBlockId, ) -> Result, ChainstateError> { - let sql = "SELECT vrf_proof FROM nakamoto_block_headers WHERE consensus_hash = ?1 AND tenure_changed = 1"; - let args: &[&dyn ToSql] = &[consensus_hash]; + let sql = r#"SELECT IFNULL(vrf_proof,"") FROM nakamoto_block_headers WHERE index_block_hash = ?1"#; + let args = params![tenure_start_block_id]; let proof_bytes: Option = query_row(chainstate_conn, sql, args)?; if let Some(bytes) = proof_bytes { + if bytes.len() == 0 { + // no VRF proof + return Ok(None); + } let proof = VRFProof::from_hex(&bytes) .ok_or(DBError::Corruption) .map_err(|e| { warn!("Failed to load VRF proof: could not decode"; "vrf_proof" => %bytes, - "consensus_hash" => %consensus_hash + "tenure_start_block_id" => %tenure_start_block_id, ); e })?; @@ -2166,9 +2773,39 @@ impl NakamotoChainState { } } - /// Verify that a nakamoto block's block-commit's VRF seed is consistent with the VRF proof - fn check_block_commit_vrf_seed( - chainstate_conn: &Connection, + /// Return the coinbase height of `block` if it was a nakamoto block, or the + /// Stacks block height of `block` if it was an epoch-2 block + /// + /// In Stacks 2.x, the coinbase height and block height are the + /// same. A miner's tenure in Stacks 2.x is entirely encompassed + /// in the single Bitcoin-anchored Stacks block they produce, as + /// well as the microblock stream they append to it. But in Nakamoto, + /// the coinbase height and block height are decoupled. + pub fn get_coinbase_height( + chainstate_conn: &mut SDBI, + block: &StacksBlockId, + ) -> Result, ChainstateError> { + // nakamoto header? + if let Some(hdr) = Self::get_block_header_nakamoto(chainstate_conn.sqlite(), block)? { + return Ok(chainstate_conn.get_coinbase_height(block, &hdr.consensus_hash)?); + } + + // epoch2 header + let epoch_2_qry = "SELECT block_height FROM block_headers WHERE index_block_hash = ?1"; + let opt_height: Option = chainstate_conn + .sqlite() + .query_row(epoch_2_qry, &[block], |row| row.get(0)) + .optional()?; + opt_height + .map(u64::try_from) + .transpose() + .map_err(|_| ChainstateError::DBError(DBError::ParseError)) + } + + /// Verify that a nakamoto block's block-commit's VRF seed is consistent with the VRF proof. + /// Specifically, it must be the hash of the parent tenure's VRF proof. + pub(crate) fn check_block_commit_vrf_seed( + chainstate_conn: &mut SDBI, sortdb_conn: &Connection, block: &NakamotoBlock, ) -> Result<(), ChainstateError> { @@ -2177,7 +2814,11 @@ impl NakamotoChainState { SortitionDB::get_block_snapshot_consensus(sortdb_conn, &block.header.consensus_hash)? .ok_or(ChainstateError::NoSuchBlockError) .map_err(|e| { - warn!("No block-commit for block"; "block_id" => %block.block_id()); + warn!("No block-commit for block"; + "consensus_hash" => %block.header.consensus_hash, + "stacks_block_hash" => %block.header.block_hash(), + "stacks_block_id" => %block.header.block_id() + ); e })?; @@ -2185,10 +2826,15 @@ impl NakamotoChainState { get_block_commit_by_txid(sortdb_conn, &sn.sortition_id, &sn.winning_block_txid)? .ok_or(ChainstateError::NoSuchBlockError) .map_err(|e| { - warn!("No block-commit for block"; "block_id" => %block.block_id()); + warn!("No block-commit for block"; + "consensus_hash" => %block.header.consensus_hash, + "stacks_block_hash" => %block.header.block_hash(), + "stacks_block_id" => %block.header.block_id() + ); e })?; + // N.B. passing block.block_id() here means that we'll look into the parent tenure block.validate_vrf_seed(sortdb_conn, chainstate_conn, &block_commit) } @@ -2204,6 +2850,7 @@ impl NakamotoChainState { block_cost: &ExecutionCost, total_tenure_cost: &ExecutionCost, tenure_changed: bool, + height_in_tenure: u32, tenure_tx_fees: u128, ) -> Result<(), ChainstateError> { assert_eq!(tip_info.stacks_block_height, header.chain_length,); @@ -2229,54 +2876,83 @@ impl NakamotoChainState { let vrf_proof_bytes = vrf_proof.map(|proof| proof.to_hex()); - let args: &[&dyn ToSql] = &[ - &u64_to_sql(*stacks_block_height)?, - &index_root, - &consensus_hash, - &burn_header_hash, - &burn_header_height, - &u64_to_sql(*burn_header_timestamp)?, - &block_size_str, - &HeaderTypeNames::Nakamoto, - &header.version, - &u64_to_sql(header.chain_length)?, - &u64_to_sql(header.burn_spent)?, - &header.miner_signature, - &header.signer_signature, - &header.tx_merkle_root, - &header.state_index_root, - &block_hash, - &index_block_hash, + let signer_signature = serde_json::to_string(&header.signer_signature).map_err(|_| { + ChainstateError::InvalidStacksBlock(format!( + "Failed to serialize signer signature for block {}", + block_hash + )) + })?; + + let args = params![ + u64_to_sql(*stacks_block_height)?, + index_root, + consensus_hash, + burn_header_hash, + burn_header_height, + u64_to_sql(*burn_header_timestamp)?, + block_size_str, + HeaderTypeNames::Nakamoto, + header.version, + u64_to_sql(header.chain_length)?, + u64_to_sql(header.burn_spent)?, + header.miner_signature, + signer_signature, + header.tx_merkle_root, + header.state_index_root, + u64_to_sql(header.timestamp)?, + block_hash, + index_block_hash, block_cost, total_tenure_cost, &tenure_tx_fees.to_string(), &header.parent_block_id, if tenure_changed { &1i64 } else { &0i64 }, &vrf_proof_bytes.as_ref(), - &header.signer_bitvec, + &header.pox_treatment, + &height_in_tenure, + tip_info.burn_view.as_ref().ok_or_else(|| { + error!( + "Attempted to store nakamoto block header information without burnchain view"; + "block_id" => %index_block_hash, + ); + ChainstateError::DBError(DBError::Other( + "Nakamoto block StacksHeaderInfo did not set burnchain view".into(), + )) + })?, ]; chainstate_tx.execute( "INSERT INTO nakamoto_block_headers - (block_height, index_root, consensus_hash, - burn_header_hash, burn_header_height, - burn_header_timestamp, block_size, - - header_type, - version, chain_length, burn_spent, - miner_signature, signer_signature, tx_merkle_root, state_index_root, - - block_hash, - index_block_hash, - cost, - total_tenure_cost, - tenure_tx_fees, - parent_block_id, - tenure_changed, - vrf_proof, - signer_bitvec - ) - VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7, ?8, ?9, ?10, ?11, ?12, ?13, ?14, ?15, ?16, ?17, ?18, ?19, ?20, ?21, ?22, ?23, ?24)", + (block_height, + index_root, + consensus_hash, + burn_header_hash, + burn_header_height, + burn_header_timestamp, + block_size, + + header_type, + version, + chain_length, + burn_spent, + miner_signature, + signer_signature, + tx_merkle_root, + state_index_root, + timestamp, + + block_hash, + index_block_hash, + cost, + total_tenure_cost, + tenure_tx_fees, + parent_block_id, + tenure_changed, + vrf_proof, + signer_bitvec, + height_in_tenure, + burn_view) + VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7, ?8, ?9, ?10, ?11, ?12, ?13, ?14, ?15, ?16, ?17, ?18, ?19, ?20, ?21, ?22, ?23, ?24, ?25, ?26, ?27)", args )?; @@ -2289,7 +2965,7 @@ impl NakamotoChainState { headers_tx: &mut StacksDBTx, parent_tip: &StacksBlockHeaderTypes, parent_consensus_hash: &ConsensusHash, - new_tip: &NakamotoBlockHeader, + new_block: &NakamotoBlock, new_vrf_proof: Option<&VRFProof>, new_burn_header_hash: &BurnchainHeaderHash, new_burnchain_height: u32, @@ -2305,8 +2981,11 @@ impl NakamotoChainState { burn_delegate_stx_ops: Vec, burn_vote_for_aggregate_key_ops: Vec, new_tenure: bool, + coinbase_height: u64, block_fees: u128, + burn_view: &ConsensusHash, ) -> Result { + let new_tip = &new_block.header; if new_tip.parent_block_id != StacksBlockId::new(&FIRST_BURNCHAIN_CONSENSUS_HASH, &FIRST_STACKS_BLOCK_HASH) { @@ -2337,10 +3016,90 @@ impl NakamotoChainState { let new_block_hash = new_tip.block_hash(); let index_block_hash = new_tip.block_id(); + let mut marf_keys = vec![]; + let mut marf_values = vec![]; + + if new_tenure { + // make the coinbase height point to this tenure-start block + marf_keys.push(nakamoto_keys::ongoing_tenure_coinbase_height( + coinbase_height, + )); + marf_values.push(nakamoto_keys::make_block_id_value(&new_tip.block_id())); + + // point this tenure to its start block + marf_keys.push(nakamoto_keys::tenure_start_block_id( + &new_tip.consensus_hash, + )); + marf_values.push(nakamoto_keys::make_block_id_value(&new_tip.block_id())); + + // record coinbase height of this tenure + marf_keys.push(nakamoto_keys::coinbase_height(&new_tip.consensus_hash)); + marf_values.push(nakamoto_keys::make_u64_value(coinbase_height)); + + // record that this previous tenure is done + let Some(tenure_change_tx) = new_block.get_tenure_change_tx_payload() else { + // should be unreachable + error!( + "Block {} is a tenure-change block, but does not contain a tenure-change tx", + new_tip.block_id() + ); + return Err(ChainstateError::InvalidStacksBlock( + "Tenure-change block does not have a tenure-change tx".into(), + )); + }; + + marf_keys.push(nakamoto_keys::finished_tenure_consensus_hash( + &tenure_change_tx.prev_tenure_consensus_hash, + )); + marf_values.push(nakamoto_keys::make_bool_value(true)); + + // record parent tenure linkage + marf_keys.push(nakamoto_keys::parent_tenure_consensus_hash( + &tenure_change_tx.tenure_consensus_hash, + )); + marf_values.push(nakamoto_keys::make_consensus_hash_value( + &tenure_change_tx.prev_tenure_consensus_hash, + )); + + // record last block-found tenure + let block_found_tenure_id = NakamotoTenureEventId { + burn_view_consensus_hash: tenure_change_tx.burn_view_consensus_hash.clone(), + block_id: new_tip.block_id(), + }; + + marf_keys.push(nakamoto_keys::block_found_tenure_id( + &tenure_change_tx.tenure_consensus_hash, + )); + marf_values.push(nakamoto_keys::make_tenure_id_value(&block_found_tenure_id)); + } + + if let Some(tenure_tx) = new_block.get_tenure_tx_payload() { + // either a block-found or a tenure-extend, but we have a new tenure ID in this fork + let tenure_id = NakamotoTenureEventId { + burn_view_consensus_hash: tenure_tx.burn_view_consensus_hash.clone(), + block_id: new_tip.block_id(), + }; + + marf_keys.push(nakamoto_keys::ongoing_tenure_id().to_string()); + marf_values.push(nakamoto_keys::make_tenure_id_value(&tenure_id)); + } + + // record the highest block in this tenure + marf_keys.push(nakamoto_keys::highest_block_in_tenure( + &new_block.header.consensus_hash, + )); + marf_values.push(nakamoto_keys::make_block_id_value(&new_tip.block_id())); + + debug!("Set Nakamoto headers MARF"; "keys" => ?marf_keys, "values" => ?marf_values); + // store each indexed field test_debug!("Headers index_put_begin {parent_hash}-{index_block_hash}"); - let root_hash = - headers_tx.put_indexed_all(&parent_hash, &index_block_hash, &vec![], &vec![])?; + let root_hash = headers_tx.put_indexed_all( + &parent_hash, + &index_block_hash, + &marf_keys, + &marf_values, + )?; test_debug!("Headers index_indexed_all finished {parent_hash}-{index_block_hash}"); let new_tip_info = StacksHeaderInfo { @@ -2353,6 +3112,7 @@ impl NakamotoChainState { burn_header_height: new_burnchain_height, burn_header_timestamp: new_burnchain_timestamp, anchored_block_size: block_size, + burn_view: Some(burn_view.clone()), }; let tenure_fees = block_fees @@ -2363,12 +3123,30 @@ impl NakamotoChainState { warn!( "Failed to fetch parent block's total tx fees"; "parent_block_id" => %parent_hash, - "block_id" => %index_block_hash, + "stacks_block_id" => %index_block_hash, ); ChainstateError::NoSuchBlockError })? }; + let height_in_tenure = if new_tenure { + 1 + } else { + let parent_height_in_tenure = + Self::get_nakamoto_tenure_length(headers_tx.sqlite(), &parent_hash)?; + if parent_height_in_tenure == 0 { + // means that there's no parent -- every tenure stored in the DB has length of at least 1 + warn!("Failed to fetch parent block's tenure height"; + "parent_block_id" => %parent_hash, + "block_id" => %index_block_hash, + ); + return Err(ChainstateError::NoSuchBlockError); + } + parent_height_in_tenure.checked_add(1).ok_or_else(|| { + ChainstateError::InvalidStacksBlock("Tenure height exceeds maximum".into()) + })? + }; + Self::insert_stacks_block_header( headers_tx.deref_mut(), &new_tip_info, @@ -2377,6 +3155,7 @@ impl NakamotoChainState { anchor_block_cost, total_tenure_cost, new_tenure, + height_in_tenure, tenure_fees, )?; if let Some(block_reward) = block_reward { @@ -2422,7 +3201,7 @@ impl NakamotoChainState { if applied_epoch_transition { debug!("Block {} applied an epoch transition", &index_block_hash); let sql = "INSERT INTO epoch_transitions (block_id) VALUES (?)"; - let args: &[&dyn ToSql] = &[&index_block_hash]; + let args = params![index_block_hash]; headers_tx.deref_mut().execute(sql, args)?; } @@ -2439,7 +3218,7 @@ impl NakamotoChainState { reward_set: &RewardSet, ) -> Result<(), ChainstateError> { let sql = "INSERT INTO nakamoto_reward_sets (index_block_hash, reward_set) VALUES (?, ?)"; - let args = rusqlite::params![block_id, &reward_set.metadata_serialize(),]; + let args = params![block_id, reward_set.metadata_serialize(),]; tx.execute(sql, args)?; Ok(()) } @@ -2484,6 +3263,9 @@ impl NakamotoChainState { /// * coinbase_height: the number of tenures that this block confirms (including epoch2 blocks) /// (this is equivalent to the number of coinbases) /// * tenure_extend: whether or not to reset the tenure's ongoing execution cost + /// * block_bitvec: the bitvec that will control PoX reward handling for this block + /// * tenure_block_commit: the block commit that elected this miner + /// * active_reward_set: the reward and signer set active during `tenure_block_commit` /// /// Returns clarity_tx, list of receipts, microblock execution cost, /// microblock fees, microblock burns, list of microblock tx receipts, @@ -2504,7 +3286,12 @@ impl NakamotoChainState { new_tenure: bool, coinbase_height: u64, tenure_extend: bool, + block_bitvec: &BitVec<4000>, + tenure_block_commit: &LeaderBlockCommitOp, + active_reward_set: &RewardSet, ) -> Result, ChainstateError> { + Self::check_pox_bitvector(block_bitvec, tenure_block_commit, active_reward_set)?; + let parent_index_hash = StacksBlockId::new(&parent_consensus_hash, &parent_header_hash); let parent_sortition_id = sortition_dbconn .get_sortition_id_from_consensus_hash(&parent_consensus_hash) @@ -2523,15 +3310,34 @@ impl NakamotoChainState { None }; - // TODO: only need to do this if this is a tenure-start block let (stacking_burn_ops, transfer_burn_ops, delegate_burn_ops, vote_for_agg_key_ops) = - StacksChainState::get_stacking_and_transfer_and_delegate_burn_ops( - chainstate_tx, - &parent_index_hash, - sortition_dbconn.sqlite_conn(), - &burn_header_hash, - burn_header_height.into(), - )?; + if new_tenure || tenure_extend { + StacksChainState::get_stacking_and_transfer_and_delegate_burn_ops( + chainstate_tx, + &parent_index_hash, + sortition_dbconn.sqlite_conn(), + &burn_header_hash, + burn_header_height.into(), + )? + } else { + (vec![], vec![], vec![], vec![]) + }; + + // Nakamoto must load block cost from parent if this block isn't a tenure change. + // If this is a tenure-extend, then the execution cost is reset. + let initial_cost = if new_tenure || tenure_extend { + ExecutionCost::zero() + } else { + let parent_cost_total = + Self::get_total_tenure_cost_at(chainstate_tx.as_tx(), &parent_index_hash)? + .ok_or_else(|| { + ChainstateError::InvalidStacksBlock(format!( + "Failed to load total tenure cost from parent. parent_stacks_block_id = {}", + &parent_index_hash + )) + })?; + parent_cost_total + }; let mut clarity_tx = StacksChainState::chainstate_block_begin( chainstate_tx, @@ -2550,7 +3356,6 @@ impl NakamotoChainState { Self::calculate_matured_miner_rewards( &mut clarity_tx, sortition_dbconn.sqlite_conn(), - // coinbase_height + 1, coinbase_height, matured_rewards_schedule, ) @@ -2558,22 +3363,6 @@ impl NakamotoChainState { .transpose()? .flatten(); - // Nakamoto must load block cost from parent if this block isn't a tenure change. - // If this is a tenure-extend, then the execution cost is reset. - let initial_cost = if new_tenure || tenure_extend { - ExecutionCost::zero() - } else { - let parent_cost_total = - Self::get_total_tenure_cost_at(&chainstate_tx.deref().deref(), &parent_index_hash)? - .ok_or_else(|| { - ChainstateError::InvalidStacksBlock(format!( - "Failed to load total tenure cost from parent. parent_stacks_block_id = {}", - &parent_index_hash - )) - })?; - parent_cost_total - }; - clarity_tx.reset_cost(initial_cost); // is this stacks block the first of a new epoch? @@ -2586,6 +3375,27 @@ impl NakamotoChainState { "parent_header_hash" => %parent_header_hash, ); + if new_tenure { + clarity_tx + .connection() + .as_free_transaction(|clarity_tx_conn| { + clarity_tx_conn.with_clarity_db(|db| { + db.set_tenure_height( + coinbase_height + .try_into() + .expect("Tenure height overflowed 32-bit range"), + )?; + Ok(()) + }) + }) + .map_err(|e| { + error!("Failed to set tenure height during block setup"; + "error" => ?e, + ); + e + })?; + } + let evaluated_epoch = clarity_tx.get_epoch(); let auto_unlock_events = if evaluated_epoch >= StacksEpochId::Epoch21 { @@ -2655,6 +3465,12 @@ impl NakamotoChainState { &mut clarity_tx, vote_for_agg_key_ops.clone(), )); + + if signer_set_calc.is_some() { + debug!("Setup block: computed reward set for the next reward cycle"; + "anchor_block_height" => coinbase_height, + "burn_header_height" => burn_header_height); + } } else { signer_set_calc = None; } @@ -2710,12 +3526,89 @@ impl NakamotoChainState { Ok(lockup_events) } + fn check_pox_bitvector( + block_bitvec: &BitVec<4000>, + tenure_block_commit: &LeaderBlockCommitOp, + active_reward_set: &RewardSet, + ) -> Result<(), ChainstateError> { + if !tenure_block_commit.treatment.is_empty() { + // our block commit issued a punishment, check the reward set and bitvector + // to ensure that this was valid. + for treated_addr in tenure_block_commit.treatment.iter() { + if treated_addr.is_burn() { + // Don't need to assert anything about burn addresses. + // If they were in the reward set, "punishing" them is meaningless. + continue; + } + // otherwise, we need to find the indices in the rewarded_addresses + // corresponding to this address. + let address_indices = active_reward_set + .rewarded_addresses + .iter() + .enumerate() + .filter_map(|(ix, addr)| { + if addr == treated_addr.deref() { + Some(ix) + } else { + None + } + }); + // if any of them are 0, punishment is okay. + // if all of them are 1, punishment is not okay. + // if all of them are 0, *must* have punished + let bitvec_values: Result, ChainstateError> = address_indices + .map( + |ix| { + let ix = u16::try_from(ix) + .map_err(|_| ChainstateError::InvalidStacksBlock("Reward set index outside of u16".into()))?; + let bitvec_value = block_bitvec.get(ix) + .unwrap_or_else(|| { + info!("Block header's bitvec is smaller than the reward set, defaulting higher indexes to 1"); + true + }); + Ok(bitvec_value) + } + ) + .collect(); + let bitvec_values = bitvec_values?; + let all_1 = bitvec_values.iter().all(|x| *x); + let all_0 = bitvec_values.iter().all(|x| !x); + if all_1 { + if treated_addr.is_punish() { + warn!( + "Invalid Nakamoto block: punished PoX address when bitvec contained 1s for the address"; + "reward_address" => %treated_addr.deref(), + "bitvec_values" => ?bitvec_values, + ); + return Err(ChainstateError::InvalidStacksBlock( + "Bitvec does not match the block commit's PoX handling".into(), + )); + } + } else if all_0 { + if treated_addr.is_reward() { + warn!( + "Invalid Nakamoto block: rewarded PoX address when bitvec contained 0s for the address"; + "reward_address" => %treated_addr.deref(), + "bitvec_values" => ?bitvec_values, + ); + return Err(ChainstateError::InvalidStacksBlock( + "Bitvec does not match the block commit's PoX handling".into(), + )); + } + } + } + } + + Ok(()) + } + /// Append a Nakamoto Stacks block to the Stacks chain state. /// NOTE: This does _not_ set the block as processed! The caller must do this. fn append_block<'a>( chainstate_tx: &mut ChainstateTx, clarity_instance: &'a mut ClarityInstance, - burn_dbconn: &mut SortitionHandleTx, + burn_dbconn: &mut SortitionHandleConn, + burnchain_view: &ConsensusHash, pox_constants: &PoxConstants, parent_chain_tip: &StacksHeaderInfo, chain_tip_burn_header_hash: &BurnchainHeaderHash, @@ -2725,6 +3618,7 @@ impl NakamotoChainState { block_size: u64, burnchain_commit_burn: u64, burnchain_sortition_burn: u64, + active_reward_set: &RewardSet, ) -> Result< ( StacksEpochReceipt, @@ -2774,53 +3668,37 @@ impl NakamotoChainState { let burn_header_height = tenure_block_snapshot.block_height; let block_hash = block.header.block_hash(); - let new_tenure = match block.is_wellformed_tenure_start_block() { - Ok(true) => true, - Ok(false) => { - // this block is mined in the ongoing tenure. - if !Self::check_tenure_continuity( - chainstate_tx, - burn_dbconn.sqlite(), - &parent_ch, - &block.header, - )? { - // this block is not part of the ongoing tenure; it's invalid - return Err(ChainstateError::ExpectedTenureChange); - } - false - } - Err(_) => { - return Err(ChainstateError::InvalidStacksBlock( - "Invalid tenure changes in nakamoto block".into(), - )); - } - }; + let new_tenure = block.is_wellformed_tenure_start_block().map_err(|_| { + ChainstateError::InvalidStacksBlock("Invalid tenure changes in nakamoto block".into()) + })?; - let tenure_extend = match block.is_wellformed_tenure_extend_block() { - Ok(true) => { - if new_tenure { - return Err(ChainstateError::InvalidStacksBlock( - "Both started and extended tenure".into(), - )); - } - true - } - Ok(false) => false, - Err(_) => { - return Err(ChainstateError::InvalidStacksBlock( - "Invalid tenure extend in nakamoto block".into(), - )); - } - }; + // this block is mined in the ongoing tenure. + if !new_tenure + && !Self::check_tenure_continuity(chainstate_tx.as_tx(), &parent_ch, &block.header)? + { + // this block is not part of the ongoing tenure; it's invalid + return Err(ChainstateError::ExpectedTenureChange); + } + let tenure_extend = block.is_wellformed_tenure_extend_block().map_err(|_| { + ChainstateError::InvalidStacksBlock("Invalid tenure changes in nakamoto block".into()) + })?; + + if tenure_extend && new_tenure { + return Err(ChainstateError::InvalidStacksBlock( + "Both started and extended tenure".into(), + )); + } let parent_coinbase_height = if block.is_first_mined() { 0 } else { - Self::get_coinbase_height(chainstate_tx.deref(), &parent_block_id)?.ok_or_else( + Self::get_coinbase_height(chainstate_tx.as_tx(), &parent_block_id)?.ok_or_else( || { warn!( "Parent of Nakamoto block is not in block headers DB yet"; - "block_hash" => %block.header.block_hash(), + "consensus_hash" => %block.header.consensus_hash, + "stacks_block_hash" => %block.header.block_hash(), + "stacks_block_id" => %block.header.block_id(), "parent_block_hash" => %parent_block_hash, "parent_block_id" => %parent_block_id ); @@ -2832,17 +3710,21 @@ impl NakamotoChainState { let expected_burn_opt = Self::get_expected_burns(burn_dbconn, chainstate_tx, block) .map_err(|e| { warn!("Unacceptable Nakamoto block: could not load expected burns (unable to find its paired sortition)"; - "block_id" => %block.block_id(), - "parent_block_id" => %block.header.parent_block_id, - "error" => e.to_string(), + "consensus_hash" => %block.header.consensus_hash, + "stacks_block_hash" => %block.header.block_hash(), + "stacks_block_id" => %block.block_id(), + "parent_block_id" => %block.header.parent_block_id, + "error" => e.to_string(), ); ChainstateError::InvalidStacksBlock("Invalid Nakamoto block: could not find sortition burns".into()) })?; let Some(expected_burn) = expected_burn_opt else { warn!("Unacceptable Nakamoto block: unable to find parent block's burns"; - "block_id" => %block.block_id(), - "parent_block_id" => %block.header.parent_block_id, + "consensus_hash" => %block.header.consensus_hash, + "stacks_block_hash" => %block.header.block_hash(), + "stacks_block_id" => %block.block_id(), + "parent_block_id" => %block.header.parent_block_id, ); return Err(ChainstateError::InvalidStacksBlock( "Invalid Nakamoto block: could not find sortition burns".into(), @@ -2860,41 +3742,49 @@ impl NakamotoChainState { )); } + // this block's bitvec header must match the miner's block commit punishments + let tenure_block_commit = SortitionDB::get_block_commit( + burn_dbconn.conn(), + &tenure_block_snapshot.winning_block_txid, + &tenure_block_snapshot.sortition_id, + )? + .ok_or_else(|| { + warn!("Invalid Nakamoto block: has no block-commit in its sortition"; + "consensus_hash" => %block.header.consensus_hash, + "stacks_block_hash" => %block.header.block_hash(), + "stacks_block_id" => %block.header.block_id(), + "sortition_id" => %tenure_block_snapshot.sortition_id, + "block_commit_txid" => %tenure_block_snapshot.winning_block_txid + ); + ChainstateError::NoSuchBlockError + })?; + // this block's tenure's block-commit contains the hash of the parent tenure's tenure-start // block. // (note that we can't check this earlier, since we need the parent tenure to have been // processed) if new_tenure && parent_chain_tip.is_nakamoto_block() && !block.is_first_mined() { - let tenure_block_commit = burn_dbconn - .get_block_commit( - &tenure_block_snapshot.winning_block_txid, - &tenure_block_snapshot.sortition_id, - )? - .ok_or_else(|| { - warn!("Invalid Nakamoto block: has no block-commit in its sortition"; - "block_id" => %block.header.block_id(), - "sortition_id" => %tenure_block_snapshot.sortition_id, - "block_commit_txid" => %tenure_block_snapshot.winning_block_txid); - ChainstateError::NoSuchBlockError - })?; - - let parent_tenure_start_header = - Self::get_nakamoto_tenure_start_block_header(chainstate_tx.tx(), &parent_ch)? - .ok_or_else(|| { - warn!("Invalid Nakamoto block: no start-tenure block for parent"; - "parent_consensus_hash" => %parent_ch, - "block_id" => %block.header.block_id()); - - ChainstateError::NoSuchBlockError - })?; + let parent_tenure_start_header = Self::get_nakamoto_tenure_start_block_header( + chainstate_tx.as_tx(), + &parent_block_id, + &parent_ch, + )? + .ok_or_else(|| { + warn!("Invalid Nakamoto block: no start-tenure block for parent"; + "parent_consensus_hash" => %parent_ch, + "consensus_hash" => %block.header.consensus_hash, + "stacks_block_hash" => %block.header.block_hash(), + "stacks_block_id" => %block.header.block_id()); + ChainstateError::NoSuchBlockError + })?; if parent_tenure_start_header.index_block_hash() != tenure_block_commit.last_tenure_id() { warn!("Invalid Nakamoto block: its tenure's block-commit's block ID hash does not match its parent tenure's start block"; - "block_id" => %block.header.block_id(), - "parent_consensus_hash" => %parent_ch, - "parent_tenure_start_block_id" => %parent_tenure_start_header.index_block_hash(), - "block_commit.last_tenure_id" => %tenure_block_commit.last_tenure_id()); + "parent_consensus_hash" => %parent_ch, + "parent_tenure_start_block_id" => %parent_tenure_start_header.index_block_hash(), + "block_commit.last_tenure_id" => %tenure_block_commit.last_tenure_id() + ); return Err(ChainstateError::NoSuchBlockError); } @@ -2904,7 +3794,7 @@ impl NakamotoChainState { // only need to do this once per tenure // get the resulting vrf proof bytes let vrf_proof_opt = if new_tenure { - Self::check_block_commit_vrf_seed(chainstate_tx.deref(), burn_dbconn, block)?; + Self::check_block_commit_vrf_seed(chainstate_tx.as_tx(), burn_dbconn, block)?; Some( block .get_vrf_proof() @@ -2975,6 +3865,9 @@ impl NakamotoChainState { new_tenure, coinbase_height, tenure_extend, + &block.header.pox_treatment, + &tenure_block_commit, + active_reward_set, )?; let starting_cost = clarity_tx.cost_so_far(); @@ -2982,6 +3875,7 @@ impl NakamotoChainState { debug!( "Append nakamoto block"; "block" => format!("{}/{block_hash}", block.header.consensus_hash), + "block_id" => %block.header.block_id(), "parent_block" => %block.header.parent_block_id, "stacks_height" => next_block_height, "total_burns" => block.header.burn_spent, @@ -3103,7 +3997,7 @@ impl NakamotoChainState { &mut chainstate_tx.tx, &parent_chain_tip.anchored_header, &parent_chain_tip.consensus_hash, - &block.header, + &block, vrf_proof_opt, chain_tip_burn_header_hash, chain_tip_burn_header_height, @@ -3119,7 +4013,9 @@ impl NakamotoChainState { burn_delegate_stx_ops, burn_vote_for_aggregate_key_ops, new_tenure, + coinbase_height, block_fees, + burnchain_view, ) .expect("FATAL: failed to advance chain tip"); @@ -3203,7 +4099,7 @@ impl NakamotoChainState { pub fn make_miners_stackerdb_config( sortdb: &SortitionDB, tip: &BlockSnapshot, - ) -> Result { + ) -> Result<(StackerDBConfig, MinersDBInformation), ChainstateError> { let ih = sortdb.index_handle(&tip.sortition_id); let last_winner_snapshot = ih.get_last_snapshot_with_sortition(tip.block_height)?; let parent_winner_snapshot = ih.get_last_snapshot_with_sortition( @@ -3215,13 +4111,13 @@ impl NakamotoChainState { // go get their corresponding leader keys, but preserve the miner's relative position in // the stackerdb signer list -- if a miner was in slot 0, then it should stay in slot 0 // after a sortition (and vice versa for 1) - let sns = if last_winner_snapshot.num_sortitions % 2 == 0 { - [last_winner_snapshot, parent_winner_snapshot] + let (latest_winner_idx, sns) = if last_winner_snapshot.num_sortitions % 2 == 0 { + (0, [last_winner_snapshot, parent_winner_snapshot]) } else { - [parent_winner_snapshot, last_winner_snapshot] + (1, [parent_winner_snapshot, last_winner_snapshot]) }; - for sn in sns { + for sn in sns.iter() { // find the commit let Some(block_commit) = ih.get_block_commit_by_txid(&sn.sortition_id, &sn.winning_block_txid)? @@ -3252,6 +4148,12 @@ impl NakamotoChainState { ); } + let miners_db_info = MinersDBInformation { + signer_0_sortition: sns[0].consensus_hash, + signer_1_sortition: sns[1].consensus_hash, + latest_winner: latest_winner_idx, + }; + let signers = miner_key_hash160s .into_iter() .map(|hash160| @@ -3265,14 +4167,17 @@ impl NakamotoChainState { )) .collect(); - Ok(StackerDBConfig { - chunk_size: MAX_PAYLOAD_LEN.into(), - signers, - write_freq: 5, - max_writes: u32::MAX, // no limit on number of writes - max_neighbors: 200, // TODO: const -- just has to be equal to or greater than the number of signers - hint_replicas: vec![], // TODO: is there a way to get the IP addresses of stackers' preferred nodes? - }) + Ok(( + StackerDBConfig { + chunk_size: MAX_PAYLOAD_LEN.into(), + signers, + write_freq: 0, + max_writes: u32::MAX, // no limit on number of writes + max_neighbors: 200, // TODO: const -- just has to be equal to or greater than the number of signers + hint_replicas: vec![], // TODO: is there a way to get the IP addresses of stackers' preferred nodes? + }, + miners_db_info, + )) } /// Get the slot range for the given miner's public key. @@ -3283,33 +4188,33 @@ impl NakamotoChainState { pub fn get_miner_slot( sortdb: &SortitionDB, tip: &BlockSnapshot, - miner_pubkey: &StacksPublicKey, + election_sortition: &ConsensusHash, ) -> Result>, ChainstateError> { - let miner_hash160 = Hash160::from_node_public_key(&miner_pubkey); - let stackerdb_config = Self::make_miners_stackerdb_config(sortdb, &tip)?; + let (stackerdb_config, miners_info) = Self::make_miners_stackerdb_config(sortdb, &tip)?; // find out which slot we're in - let mut slot_index = 0; - let mut slot_id_result = None; - for (addr, slot_count) in stackerdb_config.signers.iter() { - if addr.bytes == miner_hash160 { - slot_id_result = Some(Range { - start: slot_index, - end: slot_index + slot_count, - }); - break; - } - slot_index += slot_count; - } - - let Some(slot_id_range) = slot_id_result else { - // miner key does not match any slot + let Some(signer_ix) = miners_info + .get_signer_index(election_sortition) + .map(usize::from) + else { warn!("Miner is not in the miners StackerDB config"; - "miner" => %miner_hash160, - "stackerdb_slots" => format!("{:?}", &stackerdb_config.signers)); - + "stackerdb_slots" => ?stackerdb_config.signers, + "queried_sortition" => %election_sortition, + "sortition_hashes" => ?miners_info.get_sortitions()); return Ok(None); }; + let mut signer_ranges = stackerdb_config.signer_ranges(); + if signer_ix >= signer_ranges.len() { + // should be unreachable, but always good to be careful + warn!("Miner is not in the miners StackerDB config"; + "stackerdb_slots" => ?stackerdb_config.signers, + "queried_sortition" => %election_sortition, + "sortition_hashes" => ?miners_info.get_sortitions()); + + return Ok(None); + } + let slot_id_range = signer_ranges.swap_remove(signer_ix); + Ok(Some(slot_id_range)) } @@ -3366,7 +4271,11 @@ impl StacksMessageCodec for NakamotoBlock { // all transactions are unique if !StacksBlock::validate_transactions_unique(&txs) { - warn!("Invalid block: Found duplicate transaction"; "block_hash" => header.block_hash()); + warn!("Invalid block: Found duplicate transaction"; + "consensus_hash" => %header.consensus_hash, + "stacks_block_hash" => %header.block_hash(), + "stacks_block_id" => %header.block_id() + ); return Err(CodecError::DeserializeError( "Invalid block: found duplicate transaction".to_string(), )); @@ -3379,7 +4288,11 @@ impl StacksMessageCodec for NakamotoBlock { let tx_merkle_root: Sha512Trunc256Sum = merkle_tree.root(); if tx_merkle_root != header.tx_merkle_root { - warn!("Invalid block: Tx Merkle root mismatch"; "block_hash" => header.block_hash()); + warn!("Invalid block: Tx Merkle root mismatch"; + "consensus_hash" => %header.consensus_hash, + "stacks_block_hash" => %header.block_hash(), + "stacks_block_id" => %header.block_id() + ); return Err(CodecError::DeserializeError( "Invalid block: tx Merkle root mismatch".to_string(), )); diff --git a/stackslib/src/chainstate/nakamoto/signer_set.rs b/stackslib/src/chainstate/nakamoto/signer_set.rs index 50492869088..d7eaad51b56 100644 --- a/stackslib/src/chainstate/nakamoto/signer_set.rs +++ b/stackslib/src/chainstate/nakamoto/signer_set.rs @@ -25,8 +25,8 @@ use clarity::vm::types::{ }; use clarity::vm::{ClarityVersion, ContractName, SymbolicExpression, Value}; use lazy_static::{__Deref, lazy_static}; -use rusqlite::types::{FromSql, FromSqlError}; -use rusqlite::{params, Connection, OptionalExtension, ToSql, NO_PARAMS}; +use rusqlite::types::{FromSql, FromSqlError, ToSql}; +use rusqlite::{params, Connection, OptionalExtension}; use sha2::{Digest as Sha2Digest, Sha512_256}; use stacks_common::bitvec::BitVec; use stacks_common::codec::{ @@ -40,6 +40,7 @@ use stacks_common::types::chainstate::{ BlockHeaderHash, BurnchainHeaderHash, ConsensusHash, StacksAddress, StacksBlockId, StacksPrivateKey, StacksPublicKey, TrieHash, VRFSeed, }; +use stacks_common::types::sqlite::NO_PARAMS; use stacks_common::types::{PrivateKey, StacksEpochId}; use stacks_common::util::get_epoch_time_secs; use stacks_common::util::hash::{to_hex, Hash160, MerkleHashFunc, MerkleTree, Sha512Trunc256Sum}; @@ -58,7 +59,6 @@ use crate::chainstate::burn::operations::{ }; use crate::chainstate::burn::{BlockSnapshot, SortitionHash}; use crate::chainstate::coordinator::{BlockEventDispatcher, Error}; -use crate::chainstate::nakamoto::tenure::NAKAMOTO_TENURES_SCHEMA; use crate::chainstate::stacks::address::PoxAddress; use crate::chainstate::stacks::boot::{ PoxVersions, RawRewardSetEntry, RewardSet, BOOT_TEST_POX_4_AGG_KEY_CONTRACT, @@ -217,6 +217,8 @@ impl NakamotoSigners { Ok(slots) } + /// Compute the reward set for the next reward cycle, store it, and write it to the .signers + /// contract. `reward_cycle` is the _current_ reward cycle. pub fn handle_signer_stackerdb_update( clarity: &mut ClarityTransactionConnection, pox_constants: &PoxConstants, @@ -238,6 +240,7 @@ impl NakamotoSigners { let reward_set = StacksChainState::make_reward_set(threshold, reward_slots, StacksEpochId::Epoch30); + test_debug!("Reward set for cycle {}: {:?}", &reward_cycle, &reward_set); let stackerdb_list = if participation == 0 { vec![] } else { @@ -351,6 +354,11 @@ impl NakamotoSigners { Ok(SignerCalculation { events, reward_set }) } + /// If this block is mined in the prepare phase, based on its tenure's `burn_tip_height`. If + /// so, and if we haven't done so yet, then compute the PoX reward set, store it, and update + /// the .signers contract. The stored PoX reward set is the reward set for the next reward + /// cycle, and will be used by the Nakamoto chains coordinator to validate its block-commits + /// and block signatures. pub fn check_and_handle_prepare_phase_start( clarity_tx: &mut ClarityTx, first_block_height: u64, @@ -514,7 +522,7 @@ impl NakamotoSigners { return false; } if origin_nonce < *account_nonce { - debug!("valid_vote_transaction: Received a transaction with an outdated nonce ({account_nonce} < {origin_nonce})."); + debug!("valid_vote_transaction: Received a transaction with an outdated nonce ({origin_nonce} < {account_nonce})."); return false; } Self::parse_vote_for_aggregate_public_key(transaction).is_some() diff --git a/stackslib/src/chainstate/nakamoto/staging_blocks.rs b/stackslib/src/chainstate/nakamoto/staging_blocks.rs index 0702a890701..88e1744bb6c 100644 --- a/stackslib/src/chainstate/nakamoto/staging_blocks.rs +++ b/stackslib/src/chainstate/nakamoto/staging_blocks.rs @@ -14,15 +14,16 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -use std::fs; use std::ops::{Deref, DerefMut}; use std::path::PathBuf; +use std::{fmt, fs}; use lazy_static::lazy_static; use rusqlite::blob::Blob; -use rusqlite::types::{FromSql, FromSqlError}; -use rusqlite::{params, Connection, OpenFlags, OptionalExtension, ToSql, NO_PARAMS}; -use stacks_common::types::chainstate::{ConsensusHash, StacksBlockId}; +use rusqlite::types::{FromSql, FromSqlError, ToSql}; +use rusqlite::{params, Connection, OpenFlags, OptionalExtension}; +use stacks_common::types::chainstate::{BlockHeaderHash, ConsensusHash, StacksBlockId}; +use stacks_common::types::sqlite::NO_PARAMS; use stacks_common::util::{get_epoch_time_secs, sleep_ms}; use crate::chainstate::burn::db::sortdb::{SortitionDB, SortitionHandle}; @@ -33,10 +34,25 @@ use crate::chainstate::stacks::index::marf::MarfConnection; use crate::chainstate::stacks::{Error as ChainstateError, StacksBlock, StacksBlockHeader}; use crate::stacks_common::codec::StacksMessageCodec; use crate::util_lib::db::{ - query_int, query_row, query_row_panic, query_rows, sqlite_open, tx_begin_immediate, u64_to_sql, - DBConn, Error as DBError, FromRow, + query_int, query_row, query_row_columns, query_row_panic, query_rows, sqlite_open, + tx_begin_immediate, u64_to_sql, DBConn, Error as DBError, FromRow, }; +/// The means by which a block is obtained. +#[derive(Debug, PartialEq, Clone, Copy)] +pub enum NakamotoBlockObtainMethod { + Downloaded, + Pushed, + Mined, + Uploaded, +} + +impl fmt::Display for NakamotoBlockObtainMethod { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "{:?}", self) + } +} + pub const NAKAMOTO_STAGING_DB_SCHEMA_1: &'static [&'static str] = &[ r#" -- Table for staging nakamoto blocks @@ -70,7 +86,7 @@ pub const NAKAMOTO_STAGING_DB_SCHEMA_1: &'static [&'static str] = &[ -- when this block was processed processed_time INT NOT NULL, - -- block data + -- block data, including its header data BLOB NOT NULL, PRIMARY KEY(block_hash,consensus_hash) @@ -80,6 +96,59 @@ pub const NAKAMOTO_STAGING_DB_SCHEMA_1: &'static [&'static str] = &[ r#"CREATE INDEX nakamoto_staging_blocks_by_tenure_start_block ON nakamoto_staging_blocks(is_tenure_start,consensus_hash);"#, ]; +pub const NAKAMOTO_STAGING_DB_SCHEMA_2: &'static [&'static str] = &[ + r#" + DROP TABLE nakamoto_staging_blocks; + "#, + r#" + -- Table for staging nakamoto blocks + CREATE TABLE nakamoto_staging_blocks ( + -- SHA512/256 hash of this block (derived value from `data`) + block_hash TEXT NOT NULL, + -- The consensus hash of the burnchain block that selected this block's miner's block-commit. + -- This identifies the tenure to which this block belongs. + consensus_hash TEXT NOT NULL, + -- the parent index_block_hash + parent_block_id TEXT NOT NULL, + -- whether or not this is the first block in its tenure + is_tenure_start BOOL NOT NULL, + + -- has the burnchain block with this block's `consensus_hash` been processed? + burn_attachable INT NOT NULL, + -- has this block been processed? + processed INT NOT NULL, + -- set to 1 if this block can never be attached + orphaned INT NOT NULL, + + -- block height + height INT NOT NULL, + + -- used internally -- this is the StacksBlockId of this block's consensus hash and block hash + -- (derived value from `data`) + index_block_hash TEXT UNIQUE NOT NULL, + -- when this block was processed + processed_time INT NOT NULL, + -- how the block was obtained -- was it pushed? downloaded? uploaded? etc. + -- (encoded as text for forwards-compatibility) + obtain_method TEXT NOT NULL, + -- signing weight of this block + signing_weight INTEGER NOT NULL, + + -- block data, including its header + data BLOB NOT NULL, + + PRIMARY KEY(block_hash,consensus_hash) + );"#, + r#"CREATE INDEX nakamoto_staging_blocks_by_index_block_hash ON nakamoto_staging_blocks(index_block_hash);"#, + r#"CREATE INDEX nakamoto_staging_blocks_by_index_block_hash_and_consensus_hash ON nakamoto_staging_blocks(index_block_hash,consensus_hash);"#, + r#"CREATE INDEX nakamoto_staging_blocks_by_tenure_start_block ON nakamoto_staging_blocks(is_tenure_start,consensus_hash);"#, + r#"CREATE INDEX nakamoto_staging_blocks_by_burn_attachable ON nakamoto_staging_blocks(consensus_hash,burn_attachable);"#, + r#"CREATE TABLE db_version ( + version INTEGER NOT NULL + );"#, + r#"INSERT INTO db_version (version) VALUES (2)"#, +]; + pub struct NakamotoStagingBlocksConn(rusqlite::Connection); impl Deref for NakamotoStagingBlocksConn { @@ -160,72 +229,44 @@ impl NakamotoStagingBlocksConn { } impl<'a> NakamotoStagingBlocksConnRef<'a> { - /// Determine if there exists any unprocessed Nakamoto blocks + /// Determine if we have a particular block with the given index hash. /// Returns Ok(true) if so /// Returns Ok(false) if not - pub fn has_any_unprocessed_nakamoto_block(&self) -> Result { - let qry = "SELECT 1 FROM nakamoto_staging_blocks WHERE processed = 0 LIMIT 1"; - let res: Option = query_row(self, qry, NO_PARAMS)?; - Ok(res.is_some()) - } - - /// Determine whether or not we have processed at least one Nakamoto block in this sortition history. - /// NOTE: the relevant field queried from `nakamoto_staging_blocks` is updated by a separate - /// tx from block-processing, so it's imperative that the thread that calls this function is - /// the *same* thread as the one that processes blocks. - /// Returns Ok(true) if at least one block in `nakamoto_staging_blocks` has `processed = 1` - /// Returns Ok(false) if not /// Returns Err(..) on DB error - fn has_processed_nakamoto_block( - &self, - sortition_handle: &SH, - ) -> Result { - let Some((ch, bhh, _height)) = sortition_handle.get_nakamoto_tip()? else { - return Ok(false); - }; - - // this block must be a processed Nakamoto block - let ibh = StacksBlockId::new(&ch, &bhh); - let qry = "SELECT 1 FROM nakamoto_staging_blocks WHERE processed = 1 AND index_block_hash = ?1 LIMIT 1"; - let args: &[&dyn ToSql] = &[&ibh]; - let res: Option = query_row(self, qry, args)?; - Ok(res.is_some()) - } - - /// Determine if we have a particular block - /// Returns Ok(true) if so - /// Returns Ok(false) if not - /// Returns Err(..) on DB error - pub fn has_nakamoto_block( + pub fn has_nakamoto_block_with_index_hash( &self, index_block_hash: &StacksBlockId, ) -> Result { let qry = "SELECT 1 FROM nakamoto_staging_blocks WHERE index_block_hash = ?1"; - let args: &[&dyn ToSql] = &[index_block_hash]; + let args = params![index_block_hash]; let res: Option = query_row(self, qry, args)?; Ok(res.is_some()) } - /// Get a staged Nakamoto tenure-start block - pub fn get_nakamoto_tenure_start_block( + /// Get the block ID, processed-status, orphan-status, and signing weight of the non-orphaned + /// block with the given consensus hash and sighash with the most amount of signatures. + /// There will be at most one such block. + /// + /// NOTE: for Nakamoto blocks, the sighash is the same as the block hash. + pub(crate) fn get_block_processed_and_signed_weight( &self, consensus_hash: &ConsensusHash, - ) -> Result, ChainstateError> { - let qry = "SELECT data FROM nakamoto_staging_blocks WHERE is_tenure_start = 1 AND consensus_hash = ?1"; - let args: &[&dyn ToSql] = &[consensus_hash]; - let data: Option> = query_row(self, qry, args)?; - let Some(block_bytes) = data else { - return Ok(None); - }; - let block = NakamotoBlock::consensus_deserialize(&mut block_bytes.as_slice())?; - if &block.header.consensus_hash != consensus_hash { - error!( - "Staging DB corruption: expected {}, got {}", - consensus_hash, block.header.consensus_hash - ); - return Err(DBError::Corruption.into()); - } - Ok(Some(block)) + block_hash: &BlockHeaderHash, + ) -> Result, ChainstateError> { + let sql = "SELECT index_block_hash,processed,orphaned,signing_weight FROM nakamoto_staging_blocks WHERE consensus_hash = ?1 AND block_hash = ?2 ORDER BY signing_weight DESC, index_block_hash LIMIT 1"; + let args = params![consensus_hash, block_hash]; + + let mut stmt = self.deref().prepare(sql)?; + Ok(stmt + .query_row(args, |row| { + let block_id: StacksBlockId = row.get(0)?; + let processed: bool = row.get(1)?; + let orphaned: bool = row.get(2)?; + let signing_weight: u32 = row.get(3)?; + + Ok((block_id, processed, orphaned, signing_weight)) + }) + .optional()?) } /// Get the rowid of a Nakamoto block @@ -234,7 +275,7 @@ impl<'a> NakamotoStagingBlocksConnRef<'a> { index_block_hash: &StacksBlockId, ) -> Result, ChainstateError> { let sql = "SELECT rowid FROM nakamoto_staging_blocks WHERE index_block_hash = ?1"; - let args: &[&dyn ToSql] = &[index_block_hash]; + let args = params![index_block_hash]; let res: Option = query_row(self, sql, args)?; Ok(res) } @@ -249,7 +290,7 @@ impl<'a> NakamotoStagingBlocksConnRef<'a> { index_block_hash: &StacksBlockId, ) -> Result, ChainstateError> { let qry = "SELECT data FROM nakamoto_staging_blocks WHERE index_block_hash = ?1"; - let args: &[&dyn ToSql] = &[index_block_hash]; + let args = params![index_block_hash]; let res: Option> = query_row(self, qry, args)?; let Some(block_bytes) = res else { return Ok(None); @@ -278,7 +319,7 @@ impl<'a> NakamotoStagingBlocksConnRef<'a> { index_block_hash: &StacksBlockId, ) -> Result, ChainstateError> { let qry = "SELECT length(data) FROM nakamoto_staging_blocks WHERE index_block_hash = ?1"; - let args: &[&dyn ToSql] = &[index_block_hash]; + let args = params![index_block_hash]; let res = query_row(self, qry, args)? .map(|size: i64| u64::try_from(size).expect("FATAL: block size exceeds i64::MAX")); Ok(res) @@ -289,10 +330,9 @@ impl<'a> NakamotoStagingBlocksConnRef<'a> { /// tx from block-processing, so it's imperative that the thread that calls this function is /// the *same* thread that goes to process blocks. /// Returns (the block, the size of the block) - pub(crate) fn next_ready_nakamoto_block( + pub(crate) fn next_ready_nakamoto_block( &self, header_conn: &Connection, - sortition_handle: &SH, ) -> Result, ChainstateError> { let query = "SELECT child.data FROM nakamoto_staging_blocks child JOIN nakamoto_staging_blocks parent ON child.parent_block_id = parent.index_block_hash @@ -318,18 +358,12 @@ impl<'a> NakamotoStagingBlocksConnRef<'a> { // This query can fail if the parent of `child` is not a Nakamoto block, which // is allowed -- a Nakamoto block can descend from an epoch2 block (but since // Nakamoto does not fork without a Bitcoin fork, it'll be the only such child - // within that Bitcoin forok). + // within that Bitcoin fork unless either signers screw up, or there are + // multiple malleablized copies of this first-ever block available). // - // So, if at least one Nakamoto block is processed in this Bitcoin fork, - // then the next ready block's parent *must* be a Nakamoto block. So - // if the below is true, then there are no ready blocks. - if self.has_processed_nakamoto_block(sortition_handle)? { - return Ok(None); - } - - // no nakamoto blocks processed yet, so the parent *must* be an epoch2 block! - // go find it. Note that while this is expensive, it only has to be done - // _once_, and it will only touch at most one reward cycle's worth of blocks. + // Regardless, this query usually returns zero rows. It will return one or + // more rows in the above case for an epoch2 parent, or when there are + // discontiguous Nakamoto blocks available for processing. let sql = "SELECT index_block_hash,parent_block_id FROM nakamoto_staging_blocks WHERE processed = 0 AND orphaned = 0 AND burn_attachable = 1 ORDER BY height ASC"; let mut stmt = self.deref().prepare(sql)?; let mut qry = stmt.query(NO_PARAMS)?; @@ -338,7 +372,9 @@ impl<'a> NakamotoStagingBlocksConnRef<'a> { let index_block_hash : StacksBlockId = row.get(0)?; let parent_block_id : StacksBlockId = row.get(1)?; - let Some(_parent_epoch2_block) = NakamotoChainState::get_block_header_epoch2(header_conn, &parent_block_id)? else { + // this naturally will ignore nakamoto blocks whose parent nakamoto blocks + // are not yet known -- they won't be epoch2 blocks either! + if !NakamotoChainState::has_block_header_epoch2(header_conn, &parent_block_id)? { continue; }; @@ -357,6 +393,27 @@ impl<'a> NakamotoStagingBlocksConnRef<'a> { } }) } + + /// Given a block ID, determine if it has children that have been processed and accepted + pub fn has_children(&self, index_block_hash: &StacksBlockId) -> Result { + let qry = "SELECT 1 FROM nakamoto_staging_blocks WHERE parent_block_id = ?1 AND processed = 1 AND orphaned = 0 LIMIT 1"; + let args = rusqlite::params![index_block_hash]; + let children_flags: Option = query_row(self, qry, args)?; + Ok(children_flags.is_some()) + } + + /// Given a consensus hash, determine if the burn block has been processed. + /// Because this is stored in a denormalized way, we'll want to do this whenever we store a + /// block (so we can set `burn_attachable` accordingly) + pub fn is_burn_block_processed( + &self, + consensus_hash: &ConsensusHash, + ) -> Result { + let sql = "SELECT 1 FROM nakamoto_staging_blocks WHERE consensus_hash = ?1 AND burn_attachable = 1"; + let args = rusqlite::params![consensus_hash]; + let res: Option = query_row(self, sql, args)?; + Ok(res.is_some()) + } } impl<'a> NakamotoStagingBlocksTx<'a> { @@ -369,7 +426,7 @@ impl<'a> NakamotoStagingBlocksTx<'a> { WHERE index_block_hash = ?1"; self.execute( &clear_staged_block, - params![&block, &u64_to_sql(get_epoch_time_secs())?], + params![block, u64_to_sql(get_epoch_time_secs())?], )?; Ok(()) @@ -389,7 +446,7 @@ impl<'a> NakamotoStagingBlocksTx<'a> { WHERE index_block_hash = ?1"; self.execute( &clear_staged_block, - params![&block, &u64_to_sql(get_epoch_time_secs())?], + params![block, u64_to_sql(get_epoch_time_secs())?], )?; Ok(()) @@ -407,6 +464,112 @@ impl<'a> NakamotoStagingBlocksTx<'a> { Ok(()) } + + /// Store a block into the staging DB. + pub(crate) fn store_block( + &self, + block: &NakamotoBlock, + burn_attachable: bool, + signing_weight: u32, + obtain_method: NakamotoBlockObtainMethod, + ) -> Result<(), ChainstateError> { + let Ok(tenure_start) = block.is_wellformed_tenure_start_block() else { + return Err(ChainstateError::InvalidStacksBlock( + "Tried to store a tenure-start block that is not well-formed".into(), + )); + }; + + let burn_attachable = burn_attachable || { + // if it's burn_attachable before, it's burn_attachable always + self.conn() + .is_burn_block_processed(&block.header.consensus_hash)? + }; + + self.execute( + "INSERT INTO nakamoto_staging_blocks ( + block_hash, + consensus_hash, + parent_block_id, + is_tenure_start, + burn_attachable, + orphaned, + processed, + + height, + index_block_hash, + processed_time, + obtain_method, + signing_weight, + + data + ) VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7, ?8, ?9, ?10, ?11, ?12, ?13)", + params![ + &block.header.block_hash(), + &block.header.consensus_hash, + &block.header.parent_block_id, + &tenure_start, + if burn_attachable { 1 } else { 0 }, + 0, + 0, + u64_to_sql(block.header.chain_length)?, + &block.block_id(), + 0, + obtain_method.to_string(), + signing_weight, + block.serialize_to_vec(), + ], + )?; + if burn_attachable { + self.set_burn_block_processed(&block.header.consensus_hash)?; + } + Ok(()) + } + + /// Do we have a block with the given signer sighash? + /// NOTE: the block hash and sighash are the same for Nakamoto blocks + pub(crate) fn has_nakamoto_block_with_block_hash( + &self, + consensus_hash: &ConsensusHash, + block_hash: &BlockHeaderHash, + ) -> Result { + let qry = + "SELECT 1 FROM nakamoto_staging_blocks WHERE consensus_hash = ?1 AND block_hash = ?2"; + let args = rusqlite::params![consensus_hash, block_hash]; + let present: Option = query_row(self, qry, args)?; + Ok(present.is_some()) + } + + /// Store a block into the staging DB if its sighash has never been seen before. + /// NOTE: the block hash and sighash are the same for Nakamoto blocks, so this is equivalent to + /// storing a new block. + /// Return true if stored; false if not. + pub(crate) fn try_store_block_with_new_signer_sighash( + &self, + block: &NakamotoBlock, + burn_attachable: bool, + signing_weight: u32, + obtain_method: NakamotoBlockObtainMethod, + ) -> Result { + let block_hash = block.header.block_hash(); + if self.has_nakamoto_block_with_block_hash(&block.header.consensus_hash, &block_hash)? { + return Ok(false); + } + self.store_block(block, burn_attachable, signing_weight, obtain_method)?; + Ok(true) + } + + /// Replace an already-stored block with a newer copy with more signing + /// power. Arguments will not be validated; the caller must do this. + pub(crate) fn replace_block( + &self, + block: &NakamotoBlock, + signing_weight: u32, + obtain_method: NakamotoBlockObtainMethod, + ) -> Result<(), ChainstateError> { + self.execute("UPDATE nakamoto_staging_blocks SET data = ?1, signing_weight = ?2, obtain_method = ?3 WHERE consensus_hash = ?4 AND block_hash = ?5", + params![&block.serialize_to_vec(), &signing_weight, &obtain_method.to_string(), &block.header.consensus_hash, &block.header.block_hash()])?; + Ok(()) + } } impl StacksChainState { @@ -464,6 +627,48 @@ impl StacksChainState { Self::static_get_nakamoto_staging_blocks_path(PathBuf::from(self.root_path.as_str())) } + /// Get the database version + pub fn get_nakamoto_staging_blocks_db_version( + conn: &Connection, + ) -> Result { + let qry = "SELECT version FROM db_version ORDER BY version DESC LIMIT 1"; + let args = NO_PARAMS; + let version: Option = match query_row(&conn, qry, args) { + Ok(x) => x, + Err(e) => { + debug!("Failed to get Nakamoto staging blocks DB version: {:?}", &e); + return Ok(1); + } + }; + + match version { + Some(ver_i64) => { + let ver = u32::try_from(ver_i64) + .map_err(|_e| ChainstateError::DBError(DBError::Corruption))?; + Ok(ver) + } + None => { + debug!("No version present in Nakamoto staging blocks DB; defaulting to 1"); + Ok(1) + } + } + } + + /// Perform migrations + pub fn migrate_nakamoto_staging_blocks(conn: &Connection) -> Result<(), ChainstateError> { + let mut version = Self::get_nakamoto_staging_blocks_db_version(conn)?; + if version < 2 { + debug!("Migrate Nakamoto staging blocks DB to schema 2"); + for cmd in NAKAMOTO_STAGING_DB_SCHEMA_2.iter() { + conn.execute(cmd, NO_PARAMS)?; + } + version = Self::get_nakamoto_staging_blocks_db_version(conn)?; + assert_eq!(version, 2, "Nakamoto staging DB migration failure"); + debug!("Migrated Nakamoto staging blocks DB to schema 2"); + } + Ok(()) + } + /// Open and set up a DB for nakamoto staging blocks. /// If it doesn't exist, then instantiate it if `readwrite` is true. pub fn open_nakamoto_staging_blocks( @@ -490,7 +695,13 @@ impl StacksChainState { for cmd in NAKAMOTO_STAGING_DB_SCHEMA_1.iter() { conn.execute(cmd, NO_PARAMS)?; } + for cmd in NAKAMOTO_STAGING_DB_SCHEMA_2.iter() { + conn.execute(cmd, NO_PARAMS)?; + } + } else if readwrite { + Self::migrate_nakamoto_staging_blocks(&conn)?; } + Ok(NakamotoStagingBlocksConn(conn)) } } diff --git a/stackslib/src/chainstate/nakamoto/tenure.rs b/stackslib/src/chainstate/nakamoto/tenure.rs index 7389c033374..bff030be8f8 100644 --- a/stackslib/src/chainstate/nakamoto/tenure.rs +++ b/stackslib/src/chainstate/nakamoto/tenure.rs @@ -55,7 +55,7 @@ //! about when they created the `TenureChange. //! //! The Nakamoto system uses this module to track the set of all tenures. It does so within a -//! (derived-state) table called `nakamoto_tenures`. Whenever a `TenureChange` transaction is +//! (derived-state) table called `nakamoto_tenure_events`. Whenever a `TenureChange` transaction is //! processed, a new row will be added to this table. //! use std::collections::HashSet; @@ -67,8 +67,8 @@ use clarity::vm::database::BurnStateDB; use clarity::vm::events::StacksTransactionEvent; use clarity::vm::types::StacksAddressExtensions; use lazy_static::{__Deref, lazy_static}; -use rusqlite::types::{FromSql, FromSqlError}; -use rusqlite::{params, Connection, OptionalExtension, ToSql, NO_PARAMS}; +use rusqlite::types::{FromSql, FromSqlError, ToSql}; +use rusqlite::{params, Connection, OptionalExtension}; use sha2::{Digest as Sha2Digest, Sha512_256}; use stacks_common::codec::{ read_next, write_next, Error as CodecError, StacksMessageCodec, MAX_MESSAGE_LEN, @@ -80,6 +80,7 @@ use stacks_common::types::chainstate::{ BlockHeaderHash, BurnchainHeaderHash, ConsensusHash, StacksBlockId, StacksPrivateKey, StacksPublicKey, TrieHash, VRFSeed, }; +use stacks_common::types::sqlite::NO_PARAMS; use stacks_common::types::{PrivateKey, StacksEpochId}; use stacks_common::util::get_epoch_time_secs; use stacks_common::util::hash::{to_hex, Hash160, MerkleHashFunc, MerkleTree, Sha512Trunc256Sum}; @@ -89,12 +90,14 @@ use stacks_common::util::vrf::{VRFProof, VRFPublicKey, VRF}; use wsts::curve::point::Point; use crate::burnchains::{PoxConstants, Txid}; -use crate::chainstate::burn::db::sortdb::{SortitionDB, SortitionHandle, SortitionHandleTx}; +use crate::chainstate::burn::db::sortdb::{ + SortitionDB, SortitionHandle, SortitionHandleConn, SortitionHandleTx, +}; use crate::chainstate::burn::{BlockSnapshot, SortitionHash}; use crate::chainstate::coordinator::{BlockEventDispatcher, Error}; use crate::chainstate::nakamoto::{ MaturedMinerPaymentSchedules, MaturedMinerRewards, NakamotoBlock, NakamotoBlockHeader, - NakamotoChainState, + NakamotoChainState, StacksDBIndexed, }; use crate::chainstate::stacks::db::accounts::MinerReward; use crate::chainstate::stacks::db::{ @@ -118,7 +121,7 @@ use crate::util_lib::db::{ FromRow, }; -pub static NAKAMOTO_TENURES_SCHEMA: &'static str = r#" +pub static NAKAMOTO_TENURES_SCHEMA_1: &'static str = r#" CREATE TABLE nakamoto_tenures ( -- consensus hash of start-tenure block (i.e. the consensus hash of the sortition in which the miner's block-commit -- was mined) @@ -155,8 +158,102 @@ pub static NAKAMOTO_TENURES_SCHEMA: &'static str = r#" CREATE INDEX nakamoto_tenures_by_parent ON nakamoto_tenures(tenure_id_consensus_hash,prev_tenure_id_consensus_hash); "#; +pub static NAKAMOTO_TENURES_SCHEMA_2: &'static str = r#" + -- Drop the nakamoto_tenures table if it exists + DROP TABLE IF EXISTS nakamoto_tenures; + + CREATE TABLE nakamoto_tenures ( + -- consensus hash of start-tenure block (i.e. the consensus hash of the sortition in which the miner's block-commit + -- was mined) + tenure_id_consensus_hash TEXT NOT NULL, + -- consensus hash of the previous tenure's start-tenure block + prev_tenure_id_consensus_hash TEXT NOT NULL, + -- consensus hash of the last-processed sortition + burn_view_consensus_hash TEXT NOT NULL, + -- whether or not this tenure was triggered by a sortition (as opposed to a tenure-extension). + -- this is equal to the `cause` field in a TenureChange + cause INTEGER NOT NULL, + -- block hash of start-tenure block + block_hash TEXT NOT NULL, + -- block ID of this start block (this is the StacksBlockId of the above tenure_id_consensus_hash and block_hash) + block_id TEXT NOT NULL, + -- this field is the total number of _sortition-induced_ tenures in the chain history (including this tenure), + -- as of the _end_ of this block. A tenure can contain multiple TenureChanges; if so, then this + -- is the height of the _sortition-induced_ TenureChange that created it. + coinbase_height INTEGER NOT NULL, + -- number of blocks this tenure. + -- * for tenure-changes induced by sortitions, this is the number of blocks in the previous tenure + -- * for tenure-changes induced by extension, this is the number of blocks in the current tenure so far. + num_blocks_confirmed INTEGER NOT NULL, + -- this is the ith tenure transaction in its respective Nakamoto chain history. + tenure_index INTEGER NOT NULL, + + PRIMARY KEY(burn_view_consensus_hash,tenure_index) + ); + CREATE INDEX nakamoto_tenures_by_block_id ON nakamoto_tenures(block_id); + CREATE INDEX nakamoto_tenures_by_tenure_id ON nakamoto_tenures(tenure_id_consensus_hash); + CREATE INDEX nakamoto_tenures_by_block_and_consensus_hashes ON nakamoto_tenures(tenure_id_consensus_hash,block_hash); + CREATE INDEX nakamoto_tenures_by_burn_view_consensus_hash ON nakamoto_tenures(burn_view_consensus_hash); + CREATE INDEX nakamoto_tenures_by_tenure_index ON nakamoto_tenures(tenure_index); + CREATE INDEX nakamoto_tenures_by_parent ON nakamoto_tenures(tenure_id_consensus_hash,prev_tenure_id_consensus_hash); +"#; + +pub static NAKAMOTO_TENURES_SCHEMA_3: &'static str = r#" + -- Drop the nakamoto_tenures table if it exists + DROP TABLE IF EXISTS nakamoto_tenures; + + -- This table records each tenure-change, be it a BlockFound or Extended tenure. + -- These are not tenures themselves; these are instead inserted each time a TenureChange transaction occurs. + -- Each row is a state-change in the ongoing tenure. + CREATE TABLE nakamoto_tenure_events ( + -- consensus hash of start-tenure block (i.e. the consensus hash of the sortition in which the miner's block-commit + -- was mined) + tenure_id_consensus_hash TEXT NOT NULL, + -- consensus hash of the previous tenure's start-tenure block + prev_tenure_id_consensus_hash TEXT NOT NULL, + -- consensus hash of the last-processed sortition + burn_view_consensus_hash TEXT NOT NULL, + -- whether or not this tenure was triggered by a sortition (as opposed to a tenure-extension). + -- this is equal to the `cause` field in a TenureChange + cause INTEGER NOT NULL, + -- block hash of start-tenure block + block_hash TEXT NOT NULL, + -- block ID of this start block (this is the StacksBlockId of the above tenure_id_consensus_hash and block_hash) + block_id TEXT NOT NULL, + -- this field is the total number of _sortition-induced_ tenures in the chain history (including this tenure), + -- as of the _end_ of this block. A tenure can contain multiple TenureChanges; if so, then this + -- is the height of the _sortition-induced_ TenureChange that created it. + coinbase_height INTEGER NOT NULL, + -- number of blocks this tenure. + -- * for tenure-changes induced by sortitions, this is the number of blocks in the previous tenure + -- * for tenure-changes induced by extension, this is the number of blocks in the current tenure so far. + num_blocks_confirmed INTEGER NOT NULL, + + -- key each tenure by its tenure-start block, and the burn view (since the tenure can span multiple sortitions, and thus + -- there can be multiple burn_view_consensus_hash values per block_id) + PRIMARY KEY(burn_view_consensus_hash,block_id) + ) STRICT; + CREATE INDEX nakamoto_tenure_events_by_block_id ON nakamoto_tenure_events(block_id); + CREATE INDEX nakamoto_tenure_events_by_tenure_id ON nakamoto_tenure_events(tenure_id_consensus_hash); + CREATE INDEX nakamoto_tenure_events_by_block_and_consensus_hashes ON nakamoto_tenure_events(tenure_id_consensus_hash,block_hash); + CREATE INDEX nakamoto_tenure_events_by_burn_view_consensus_hash ON nakamoto_tenure_events(burn_view_consensus_hash); + CREATE INDEX nakamoto_tenure_events_by_parent ON nakamoto_tenure_events(tenure_id_consensus_hash,prev_tenure_id_consensus_hash); +"#; + +/// Primary key into nakamoto_tenure_events. +/// Used for MARF lookups +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] +pub struct NakamotoTenureEventId { + /// last sortition in this tenure + pub burn_view_consensus_hash: ConsensusHash, + /// start block ID of this tenure + pub block_id: StacksBlockId, +} + +/// Nakamto tenure event. Something happened to the tenure stream, and this struct encodes it (be +/// it a new tenure was started, or the current tenure was extended). #[derive(Debug, Clone, PartialEq)] -pub struct NakamotoTenure { +pub struct NakamotoTenureEvent { /// consensus hash of start-tenure block pub tenure_id_consensus_hash: ConsensusHash, /// consensus hash of parent tenure's start-tenure block @@ -170,17 +267,14 @@ pub struct NakamotoTenure { pub block_hash: BlockHeaderHash, /// block ID of this start block pub block_id: StacksBlockId, - /// number of sortition-tenures so far, including this one. - /// This is, equivalently, the number of coinbases emitted so far. + /// coinbase height of this tenure pub coinbase_height: u64, - /// number of tenure-change transactions so far, including this one - pub tenure_index: u64, /// number of blocks this tenure confirms pub num_blocks_confirmed: u32, } -impl FromRow for NakamotoTenure { - fn from_row(row: &rusqlite::Row) -> Result { +impl FromRow for NakamotoTenureEvent { + fn from_row(row: &rusqlite::Row) -> Result { let tenure_id_consensus_hash = row.get("tenure_id_consensus_hash")?; let prev_tenure_id_consensus_hash = row.get("prev_tenure_id_consensus_hash")?; let burn_view_consensus_hash = row.get("burn_view_consensus_hash")?; @@ -189,15 +283,10 @@ impl FromRow for NakamotoTenure { let block_hash = row.get("block_hash")?; let block_id = row.get("block_id")?; let coinbase_height_i64: i64 = row.get("coinbase_height")?; - let coinbase_height = coinbase_height_i64 - .try_into() - .map_err(|_| DBError::ParseError)?; - let tenure_index_i64: i64 = row.get("tenure_index")?; - let tenure_index = tenure_index_i64 - .try_into() - .map_err(|_| DBError::ParseError)?; + let coinbase_height = + u64::try_from(coinbase_height_i64).map_err(|_| DBError::ParseError)?; let num_blocks_confirmed: u32 = row.get("num_blocks_confirmed")?; - Ok(NakamotoTenure { + Ok(NakamotoTenureEvent { tenure_id_consensus_hash, prev_tenure_id_consensus_hash, burn_view_consensus_hash, @@ -205,7 +294,6 @@ impl FromRow for NakamotoTenure { block_hash, block_id, coinbase_height, - tenure_index, num_blocks_confirmed, }) } @@ -346,88 +434,48 @@ impl NakamotoChainState { Ok(matured_miner_rewards_opt) } - /// Return the coinbase height of `block` if it was a nakamoto block, or the - /// Stacks block height of `block` if it was an epoch-2 block - /// - /// In Stacks 2.x, the coinbase height and block height are the - /// same. A miner's tenure in Stacks 2.x is entirely encompassed - /// in the single Bitcoin-anchored Stacks block they produce, as - /// well as the microblock stream they append to it. But in Nakamoto, - /// the coinbase height and block height are decoupled. - pub fn get_coinbase_height( - chainstate_conn: &Connection, - block: &StacksBlockId, - ) -> Result, ChainstateError> { - let sql = "SELECT * FROM nakamoto_block_headers WHERE index_block_hash = ?1"; - let result: Option = - query_row_panic(chainstate_conn, sql, &[&block], || { - "FATAL: multiple rows for the same block hash".to_string() - })?; - if let Some(nak_hdr) = result { - let nak_qry = "SELECT coinbase_height FROM nakamoto_tenures WHERE tenure_id_consensus_hash = ?1 ORDER BY tenure_index DESC LIMIT 1"; - let opt_height: Option = chainstate_conn - .query_row(nak_qry, &[&nak_hdr.consensus_hash], |row| row.get(0)) - .optional()?; - if let Some(height) = opt_height { - return Ok(Some( - u64::try_from(height).map_err(|_| DBError::ParseError)?, - )); - } else { - // should be unreachable - return Err(DBError::NotFoundError.into()); - } - } - - let epoch_2_qry = "SELECT block_height FROM block_headers WHERE index_block_hash = ?1"; - let opt_height: Option = chainstate_conn - .query_row(epoch_2_qry, &[block], |row| row.get(0)) - .optional()?; - opt_height - .map(u64::try_from) - .transpose() - .map_err(|_| ChainstateError::DBError(DBError::ParseError)) - } - /// Determine if a tenure has been fully processed. - pub fn has_processed_nakamoto_tenure( - conn: &Connection, + /// That is, we've processed both its tenure-start block, and we've processed a tenure-change that + /// claims this tenure as its parent tenure. + /// + /// If we haven't processed a tenure-start block for this tenure, then return false. + pub fn has_processed_nakamoto_tenure( + conn: &mut SDBI, + tip_block_id: &StacksBlockId, tenure_id_consensus_hash: &ConsensusHash, ) -> Result { - // a tenure will have been processed if any of its children have been processed - let sql = "SELECT 1 FROM nakamoto_tenures WHERE prev_tenure_id_consensus_hash = ?1 LIMIT 1"; - let args: &[&dyn ToSql] = &[tenure_id_consensus_hash]; - let found: Option = query_row(conn, sql, args)?; - Ok(found.is_some()) + Ok(conn + .is_tenure_finished(tip_block_id, tenure_id_consensus_hash)? + .unwrap_or(false)) } /// Insert a nakamoto tenure. + /// `block_header` is the header of the block containing `tenure`. /// No validation will be done. pub(crate) fn insert_nakamoto_tenure( tx: &Connection, block_header: &NakamotoBlockHeader, coinbase_height: u64, - tenure_index: u64, tenure: &TenureChangePayload, ) -> Result<(), ChainstateError> { // NOTE: this is checked with check_nakamoto_tenure() assert_eq!(block_header.consensus_hash, tenure.tenure_consensus_hash); - let args: &[&dyn ToSql] = &[ - &tenure.tenure_consensus_hash, - &tenure.prev_tenure_consensus_hash, - &tenure.burn_view_consensus_hash, - &tenure.cause.as_u8(), - &block_header.block_hash(), - &block_header.block_id(), - &u64_to_sql(coinbase_height)?, - &u64_to_sql(tenure_index)?, - &tenure.previous_tenure_blocks, + let args = params![ + tenure.tenure_consensus_hash, + tenure.prev_tenure_consensus_hash, + tenure.burn_view_consensus_hash, + tenure.cause.as_u8(), + block_header.block_hash(), + block_header.block_id(), + u64_to_sql(coinbase_height)?, + tenure.previous_tenure_blocks, ]; tx.execute( - "INSERT INTO nakamoto_tenures + "INSERT INTO nakamoto_tenure_events (tenure_id_consensus_hash, prev_tenure_id_consensus_hash, burn_view_consensus_hash, cause, - block_hash, block_id, coinbase_height, tenure_index, num_blocks_confirmed) + block_hash, block_id, coinbase_height, num_blocks_confirmed) VALUES - (?1,?2,?3,?4,?5,?6,?7,?8,?9)", + (?1,?2,?3,?4,?5,?6,?7,?8)", args, )?; @@ -442,123 +490,83 @@ impl NakamotoChainState { ch: &ConsensusHash, ) -> Result<(), ChainstateError> { tx.execute( - "DELETE FROM nakamoto_tenures WHERE tenure_id_consensus_hash = ?1", + "DELETE FROM nakamoto_tenure_events WHERE tenure_id_consensus_hash = ?1", &[ch], )?; Ok(()) } - /// Get the first block header in a Nakamoto tenure - pub fn get_nakamoto_tenure_start_block_header( - chainstate_conn: &Connection, - consensus_hash: &ConsensusHash, - ) -> Result, ChainstateError> { - let sql = "SELECT * FROM nakamoto_block_headers WHERE consensus_hash = ?1 ORDER BY block_height ASC LIMIT 1"; - query_row_panic(chainstate_conn, sql, &[&consensus_hash], || { - "FATAL: multiple rows for the same consensus hash".to_string() - }) - .map_err(ChainstateError::DBError) - } - /// Get the consensus hash of the parent tenure /// Used by the p2p code. /// Don't use in consensus code. - pub fn get_nakamoto_parent_tenure_id_consensus_hash( - chainstate_conn: &Connection, + pub fn get_nakamoto_parent_tenure_id_consensus_hash( + chainstate_conn: &mut SDBI, + tip_block_id: &StacksBlockId, consensus_hash: &ConsensusHash, ) -> Result, ChainstateError> { - let sql = "SELECT prev_tenure_id_consensus_hash AS consensus_hash FROM nakamoto_tenures WHERE tenure_id_consensus_hash = ?1 ORDER BY tenure_index DESC LIMIT 1"; - let args: &[&dyn ToSql] = &[consensus_hash]; - query_row(chainstate_conn, sql, args).map_err(ChainstateError::DBError) - } - - /// Get the last block header in a Nakamoto tenure - pub fn get_nakamoto_tenure_finish_block_header( - chainstate_conn: &Connection, - consensus_hash: &ConsensusHash, - ) -> Result, ChainstateError> { - let sql = "SELECT * FROM nakamoto_block_headers WHERE consensus_hash = ?1 ORDER BY block_height DESC LIMIT 1"; - query_row_panic(chainstate_conn, sql, &[&consensus_hash], || { - "FATAL: multiple rows for the same consensus hash".to_string() - }) - .map_err(ChainstateError::DBError) + Ok(chainstate_conn.get_parent_tenure_consensus_hash(tip_block_id, consensus_hash)?) } - /// Get the number of blocks in a tenure. + /// Get the number of blocks in a tenure, given a block ID. /// Only works for Nakamoto blocks, not Stacks epoch2 blocks. - /// Returns 0 if the consensus hash is not found. + /// Returns 0 if there are no blocks in this tenure pub fn get_nakamoto_tenure_length( chainstate_conn: &Connection, - consensus_hash: &ConsensusHash, + block_id: &StacksBlockId, ) -> Result { - let sql = "SELECT IFNULL(COUNT(block_hash),0) FROM nakamoto_block_headers WHERE consensus_hash = ?1"; - let count_i64 = query_int(chainstate_conn, sql, &[&consensus_hash])?; - let count: u32 = count_i64 - .try_into() - .expect("FATAL: too many blocks in tenure"); - Ok(count) - } - - /// Get the highest coinbase height processed. - /// Returns Ok(Some(coinbase_height)) if we have processed at least one tenure - /// Returns Ok(None) if we have not yet processed a Nakamoto tenure - /// Returns Err(..) on database errors - pub fn get_highest_nakamoto_coinbase_height( - conn: &Connection, - max: u64, - ) -> Result, ChainstateError> { - match conn - .query_row( - "SELECT IFNULL(MAX(coinbase_height), 0) FROM nakamoto_tenures WHERE coinbase_height < ?1", - &[&u64_to_sql(max)?], - |row| Ok(u64::from_row(row).expect("Expected u64 in database")), - ) - .optional()? - { - Some(0) => { - // this never happens, so it's None - Ok(None) + // at least one block in this tenure + let sql = "SELECT height_in_tenure FROM nakamoto_block_headers WHERE index_block_hash = ?1"; + let count = match query_int(chainstate_conn, sql, &[block_id]) { + Ok(count_i64) => { + let count: u32 = count_i64 + .try_into() + .expect("FATAL: too many blocks in tenure"); + count } - Some(height_i64) => { - Ok(Some( - height_i64.try_into().map_err(|_| DBError::ParseError)?, - )) + Err(DBError::NotFoundError) => 0, + Err(e) => { + return Err(e.into()); } - None => Ok(None), - } + }; + Ok(count) } - /// Get a nakamoto tenure-change by its tenure ID consensus hash. - /// Get the highest such record. It will be the last-processed BlockFound tenure - /// for the given sortition consensus hash. - pub fn get_highest_nakamoto_tenure_change_by_tenure_id( + /// Get a Nakamoto tenure change by its ID + pub fn get_nakamoto_tenure_change( headers_conn: &Connection, - tenure_id_consensus_hash: &ConsensusHash, - ) -> Result, ChainstateError> { - let sql = "SELECT * FROM nakamoto_tenures WHERE tenure_id_consensus_hash = ?1 AND cause = ?2 ORDER BY tenure_index DESC LIMIT 1"; - let args: &[&dyn ToSql] = &[ - tenure_id_consensus_hash, - &TenureChangeCause::BlockFound.as_u8(), - ]; - let tenure_opt: Option = query_row(headers_conn, sql, args)?; - Ok(tenure_opt) + tenure_id: &NakamotoTenureEventId, + ) -> Result, ChainstateError> { + let sql = + "SELECT * FROM nakamoto_tenure_events WHERE burn_view_consensus_hash = ?1 AND block_id = ?2"; + let args = rusqlite::params![tenure_id.burn_view_consensus_hash, tenure_id.block_id]; + Ok(query_row(headers_conn, sql, args)?) } - /// Get the highest processed tenure on the canonical sortition history. - pub fn get_highest_nakamoto_tenure( - headers_conn: &Connection, - sortdb_conn: &Connection, - ) -> Result, ChainstateError> { - // find the tenure for the Stacks chain tip - let (tip_ch, tip_bhh) = SortitionDB::get_canonical_stacks_chain_tip_hash(sortdb_conn)?; - if tip_ch == FIRST_BURNCHAIN_CONSENSUS_HASH || tip_bhh == FIRST_STACKS_BLOCK_HASH { - // no chain tip, so no tenure + /// Get the tenure-change most recently processed in the history tipped by the given block. + /// This can be a block-found or an extended tenure change. + /// Returns None if this tip is an epoch2x block ID + pub fn get_ongoing_tenure( + headers_conn: &mut SDBI, + tip_block_id: &StacksBlockId, + ) -> Result, ChainstateError> { + let Some(tenure_id) = headers_conn.get_ongoing_tenure_id(tip_block_id)? else { return Ok(None); - } - let sql = "SELECT * FROM nakamoto_tenures WHERE tenure_id_consensus_hash = ?1 ORDER BY tenure_index DESC LIMIT 1"; - let args: &[&dyn ToSql] = &[&tip_ch]; - let tenure_opt: Option = query_row(headers_conn, sql, args)?; - Ok(tenure_opt) + }; + Self::get_nakamoto_tenure_change(headers_conn.sqlite(), &tenure_id) + } + + /// Get the block-found tenure-change for a given tenure ID consensus hash + pub fn get_block_found_tenure( + headers_conn: &mut SDBI, + tip_block_id: &StacksBlockId, + tenure_id_consensus_hash: &ConsensusHash, + ) -> Result, ChainstateError> { + let Some(tenure_id) = + headers_conn.get_block_found_tenure_id(tip_block_id, tenure_id_consensus_hash)? + else { + return Ok(None); + }; + Self::get_nakamoto_tenure_change(headers_conn.sqlite(), &tenure_id) } /// Verify that a tenure change tx is a valid first-ever tenure change. It must connect to an @@ -569,7 +577,7 @@ impl NakamotoChainState { pub(crate) fn check_first_nakamoto_tenure_change( headers_conn: &Connection, tenure_payload: &TenureChangePayload, - ) -> Result, ChainstateError> { + ) -> Result, ChainstateError> { // must be a tenure-change if !tenure_payload.cause.expects_sortition() { warn!("Invalid tenure-change: not a sortition-induced tenure-change"; @@ -582,19 +590,26 @@ impl NakamotoChainState { let Some(parent_header) = Self::get_block_header(headers_conn, &tenure_payload.previous_tenure_end)? else { - warn!("Invalid tenure-change: no parent epoch2 header"; + warn!("Invalid tenure-change from epoch2: no parent epoch2 header"; "consensus_hash" => %tenure_payload.tenure_consensus_hash, "previous_tenure_end" => %tenure_payload.previous_tenure_end ); return Ok(None); }; if tenure_payload.previous_tenure_blocks != 1 { - warn!("Invalid tenure-change: expected 1 previous tenure block"; + warn!("Invalid tenure-change from epoch2: expected 1 previous tenure block"; "consensus_hash" => %tenure_payload.tenure_consensus_hash, "previous_tenure_blocks" => %tenure_payload.previous_tenure_blocks ); return Ok(None); } + if tenure_payload.prev_tenure_consensus_hash != parent_header.consensus_hash { + warn!("Invalid tenure-change from epoch2: parent tenure consensus hash mismatch"; + "prev_tenure_consensus_hash" => %tenure_payload.prev_tenure_consensus_hash, + "parent_header.consensus_hash" => %parent_header.consensus_hash + ); + return Ok(None); + } let Some(epoch2_header_info) = parent_header.anchored_header.as_stacks_epoch2() else { warn!("Invalid tenure-change: parent header is not epoch2"; "consensus_hash" => %tenure_payload.tenure_consensus_hash, @@ -604,7 +619,7 @@ impl NakamotoChainState { }; // synthesize the "last epoch2" tenure info, so we can calculate the first nakamoto tenure - let last_epoch2_tenure = NakamotoTenure { + let last_epoch2_tenure = NakamotoTenureEvent { tenure_id_consensus_hash: parent_header.consensus_hash.clone(), prev_tenure_id_consensus_hash: ConsensusHash([0x00; 20]), // ignored, burn_view_consensus_hash: parent_header.consensus_hash.clone(), @@ -615,8 +630,6 @@ impl NakamotoChainState { &epoch2_header_info.block_hash(), ), coinbase_height: epoch2_header_info.total_work.work, - // NOTE: first Nakamoto tenure and tenure index will have height 1 - tenure_index: 0, num_blocks_confirmed: 1, }; Ok(Some(last_epoch2_tenure)) @@ -655,15 +668,18 @@ impl NakamotoChainState { /// * previous_tenure_blocks /// * cause /// - /// Returns Ok(Some(highest-processed-tenure)) on success + /// `block_header` is the block header of a tenure-change block, which includes + /// `tenure_payload` as its first transaction. + /// + /// Returns Ok(Some(processed-tenure)) on success /// Returns Ok(None) if the tenure change is invalid /// Returns Err(..) on DB error - pub(crate) fn check_nakamoto_tenure( - headers_conn: &Connection, + pub(crate) fn check_nakamoto_tenure( + headers_conn: &mut SDBI, sort_handle: &mut SH, block_header: &NakamotoBlockHeader, tenure_payload: &TenureChangePayload, - ) -> Result, ChainstateError> { + ) -> Result, ChainstateError> { // block header must match this tenure if block_header.consensus_hash != tenure_payload.tenure_consensus_hash { warn!("Invalid tenure-change (or block) -- mismatched consensus hash"; @@ -673,7 +689,16 @@ impl NakamotoChainState { return Ok(None); } - // all consensus hashes must be on the canonical fork, if they're not the first-ever + // this tenure_payload must point to the parent block + if tenure_payload.previous_tenure_end != block_header.parent_block_id { + warn!("Invalid tenure-change: does not confirm parent block"; + "previous_tenure_end" => %tenure_payload.previous_tenure_end, + "parent_block_id" => %block_header.parent_block_id + ); + return Ok(None); + } + + // all consensus hashes must be on the canonical burnchain fork, if they're not the first-ever let Some(tenure_sn) = Self::check_valid_consensus_hash(sort_handle, &tenure_payload.tenure_consensus_hash)? else { @@ -689,7 +714,7 @@ impl NakamotoChainState { // tenure_sn must be no more recent than sortition_sn if tenure_sn.block_height > sortition_sn.block_height { - warn!("Invalid tenure-change: tenure snapshot comes sortition snapshot"; "tenure_consensus_hash" => %tenure_payload.tenure_consensus_hash, "burn_view_consensus_hash" => %tenure_payload.prev_tenure_consensus_hash); + warn!("Invalid tenure-change: tenure snapshot comes before sortition snapshot"; "tenure_consensus_hash" => %tenure_payload.tenure_consensus_hash, "burn_view_consensus_hash" => %tenure_payload.prev_tenure_consensus_hash); return Ok(None); } @@ -742,17 +767,34 @@ impl NakamotoChainState { return Ok(None); } - let Some(highest_processed_tenure) = - Self::get_highest_nakamoto_tenure(headers_conn, sort_handle.sqlite())? + // What tenure are we building off of? This is the tenure in which the parent block + // resides. Note that if this block is a tenure-extend block, then parent_block_id and + // this block reside in the same tenure (but this block will insert a tenure-extend record + // into the tenure-changes table). + let Some(parent_tenure) = + Self::get_ongoing_tenure(headers_conn, &block_header.parent_block_id)? else { - // no previous tenures. This is the first tenure change. It should point to an epoch + // not building off of a previous Nakamoto tenure. This is the first tenure change. It should point to an epoch // 2.x block. - return Self::check_first_nakamoto_tenure_change(headers_conn, tenure_payload); + return Self::check_first_nakamoto_tenure_change(headers_conn.sqlite(), tenure_payload); }; // validate cause match tenure_payload.cause { - TenureChangeCause::BlockFound => {} + TenureChangeCause::BlockFound => { + // this tenure_payload's prev_consensus_hash must match the parent block tenure's + // tenure_consensus_hash -- i.e. this tenure must be distinct from the parent + // block's tenure + if parent_tenure.tenure_id_consensus_hash + != tenure_payload.prev_tenure_consensus_hash + { + warn!("Invalid tenure-change: tenure block-found does not confirm parent block's tenure"; + "parent_tenure.tenure_consensus_hash" => %parent_tenure.tenure_id_consensus_hash, + "prev_consensus_hash" => %tenure_payload.prev_tenure_consensus_hash + ); + return Ok(None); + } + } TenureChangeCause::Extended => { // tenure extensions don't begin a new tenure (since the miner isn't changing), so // the tenure consensus hash must be the same as the previous tenure consensus hash @@ -764,90 +806,39 @@ impl NakamotoChainState { ); return Ok(None); } - if tenure_payload.burn_view_consensus_hash - == highest_processed_tenure.burn_view_consensus_hash - { - // if we're extending tenure within the same sortition, then the tenure and - // prev_tenure consensus hashes must match that of the highest. - if highest_processed_tenure.tenure_id_consensus_hash - != tenure_payload.tenure_consensus_hash - || highest_processed_tenure.tenure_id_consensus_hash - != tenure_payload.prev_tenure_consensus_hash - { - warn!("Invalid tenure-change: tenure extension within the same sortition tries to override the highest sortition"; - "tenure_consensus_hash" => %tenure_payload.tenure_consensus_hash, - "prev_tenure_consensus_hash" => %tenure_payload.prev_tenure_consensus_hash, - "highest_processed_tenure.consensus_hash" => %highest_processed_tenure.tenure_id_consensus_hash, - "highest_processed_tenure.prev_consensus_hash" => %highest_processed_tenure.prev_tenure_id_consensus_hash - ); - return Ok(None); - } - } } - } - - let Some(last_tenure_finish_block_id) = Self::get_nakamoto_tenure_finish_block_header( - headers_conn, - &highest_processed_tenure.tenure_id_consensus_hash, - )? - .map(|hdr| hdr.index_block_hash()) else { - // last tenure doesn't exist (should be unreachable) - warn!("Invalid tenure-change: no blocks found for highest processed tenure"; - "consensus_hash" => %highest_processed_tenure.tenure_id_consensus_hash, - ); - return Ok(None); }; - // must build atop the highest-processed tenure. - // NOTE: for tenure-extensions, the second check is always false, since the tenure and - // prev-tenure consensus hashes must be the same per the above check. - if last_tenure_finish_block_id != tenure_payload.previous_tenure_end - || highest_processed_tenure.tenure_id_consensus_hash - != tenure_payload.prev_tenure_consensus_hash - { - // not continuous -- this tenure-change does not point to the end of the - // last-processed tenure, or does not point to the last-processed tenure's sortition - warn!("Invalid tenure-change: discontiguous"; - "tenure_consensus_hash" => %tenure_payload.tenure_consensus_hash, - "prev_tenure_consensus_hash" => %tenure_payload.prev_tenure_consensus_hash, - "highest_processed_tenure.consensus_hash" => %highest_processed_tenure.tenure_id_consensus_hash, - "last_tenure_finish_block_id" => %last_tenure_finish_block_id, - "tenure_payload.previous_tenure_end" => %tenure_payload.previous_tenure_end - ); - return Ok(None); - } - - // The tenure-change must report the number of blocks _so far_ in the current tenure. If - // there is a succession of tenure-extensions for a given tenure, then the reported tenure + // The tenure-change must report the number of blocks _so far_ in the previous tenure (note if this is a TenureChangeCause::Extended, then its parent tenure will be its own tenure). + // If there is a succession of tenure-extensions for a given tenure, then the reported tenure // length must report the number of blocks since the last _sortition-induced_ tenure // change. - let tenure_len = Self::get_nakamoto_tenure_length( - headers_conn, - &highest_processed_tenure.tenure_id_consensus_hash, - )?; + let tenure_len = + Self::get_nakamoto_tenure_length(headers_conn.sqlite(), &block_header.parent_block_id)?; + if tenure_len != tenure_payload.previous_tenure_blocks { // invalid -- does not report the correct number of blocks in the past tenure warn!("Invalid tenure-change: wrong number of blocks"; "tenure_consensus_hash" => %tenure_payload.tenure_consensus_hash, - "highest_processed_tenure.consensus_hash" => %highest_processed_tenure.tenure_id_consensus_hash, + "prev_tenure_consensus_hash" => %tenure_payload.prev_tenure_consensus_hash, "tenure_len" => tenure_len, "tenure_payload.previous_tenure_blocks" => tenure_payload.previous_tenure_blocks ); return Ok(None); } - Ok(Some(highest_processed_tenure)) + Ok(Some(parent_tenure)) } /// Advance the tenures table with a validated block's tenure data. /// This applies to both tenure-changes and tenure-extends. - /// Returns the highest tenure-change height (this is parent_coinbase_height + 1 if there was a + /// Returns the tenure-change height (this is parent_coinbase_height + 1 if there was a /// tenure-change tx, or just parent_coinbase_height if there was a tenure-extend tx or no tenure /// txs at all). /// TODO: unit test - pub(crate) fn advance_nakamoto_tenure( + pub(crate) fn advance_nakamoto_tenure( headers_tx: &mut StacksDBTx, - sort_tx: &mut SortitionHandleTx, + handle: &mut SH, block: &NakamotoBlock, parent_coinbase_height: u64, ) -> Result { @@ -869,53 +860,57 @@ impl NakamotoChainState { } }; - let Some(highest_processed_tenure) = - Self::check_nakamoto_tenure(headers_tx, sort_tx, &block.header, tenure_payload)? - else { + if Self::check_nakamoto_tenure(headers_tx, handle, &block.header, tenure_payload)?.is_none() + { return Err(ChainstateError::InvalidStacksTransaction( "Invalid tenure tx".into(), false, )); }; - Self::insert_nakamoto_tenure( - headers_tx, - &block.header, - coinbase_height, - highest_processed_tenure - .tenure_index - .checked_add(1) - .expect("too many tenure-changes"), - tenure_payload, - )?; + Self::insert_nakamoto_tenure(headers_tx, &block.header, coinbase_height, tenure_payload)?; return Ok(coinbase_height); } /// Check that this block is in the same tenure as its parent, and that this tenure is the - /// highest-seen tenure. Use this to check blocks that do _not_ have tenure-changes. + /// highest-seen tenure. Use this to check blocks that do _not_ have BlockFound tenure-changes. + /// + /// `parent_ch` is the tenure ID consensus hash of the given block's parent. /// /// Returns Ok(bool) to indicate whether or not this block is in the same tenure as its parent. /// Returns Err(..) on DB error - pub(crate) fn check_tenure_continuity( - headers_conn: &Connection, - sortdb_conn: &Connection, + pub(crate) fn check_tenure_continuity( + headers_conn: &mut SDBI, parent_ch: &ConsensusHash, block_header: &NakamotoBlockHeader, ) -> Result { // block must have the same consensus hash as its parent if block_header.is_first_mined() || parent_ch != &block_header.consensus_hash { + test_debug!("Block is discontinuous with tenure: either first-mined or has a different tenure ID"; + "parent_ch" => %parent_ch, + "block_header.consensus_hash" => %block_header.consensus_hash, + "is_first_mined()" => block_header.is_first_mined(), + ); return Ok(false); } // block must be in the same tenure as the highest-processed tenure. - let Some(highest_tenure) = Self::get_highest_nakamoto_tenure(headers_conn, sortdb_conn)? + let Some(highest_tenure) = + Self::get_ongoing_tenure(headers_conn, &block_header.parent_block_id)? else { // no tenures yet, so definitely not continuous + test_debug!("Block is discontinuous with tenure: no ongoing tenure"; + "block_header.parent_block_id" => %block_header.parent_block_id, + ); return Ok(false); }; if &highest_tenure.tenure_id_consensus_hash != parent_ch { // this block is not in the highest-known tenure, so it can't be continuous + test_debug!("Block is discontinuous with tenure: parent is not in current tenure"; + "parent_ch" => %parent_ch, + "highest_tenure.tenure_id_consensus_hash" => %highest_tenure.tenure_id_consensus_hash, + ); return Ok(false); } @@ -943,7 +938,7 @@ impl NakamotoChainState { /// TODO: unit test pub(crate) fn calculate_scheduled_tenure_reward( chainstate_tx: &mut ChainstateTx, - burn_dbconn: &mut SortitionHandleTx, + burn_dbconn: &SortitionHandleConn, block: &NakamotoBlock, evaluated_epoch: StacksEpochId, parent_coinbase_height: u64, @@ -956,7 +951,7 @@ impl NakamotoChainState { // figure out if there any accumulated rewards by // getting the snapshot that elected this block. let accumulated_rewards = SortitionDB::get_block_snapshot_consensus( - burn_dbconn.tx(), + burn_dbconn.conn(), &block.header.consensus_hash, )? .expect("CORRUPTION: failed to load snapshot that elected processed block") @@ -977,8 +972,10 @@ impl NakamotoChainState { warn!("While processing tenure change, failed to look up parent tenure"; "parent_coinbase_height" => parent_coinbase_height, "parent_block_id" => %block.header.parent_block_id, - "block_hash" => %block.header.block_hash(), - "block_consensus_hash" => %block.header.consensus_hash); + "consensus_hash" => %block.header.consensus_hash, + "stacks_block_hash" => %block.header.block_hash(), + "stacks_block_id" => %block.header.block_id() + ); ChainstateError::NoSuchBlockError })?; // fetch the parent tenure fees by reading the total tx fees from this block's @@ -991,8 +988,10 @@ impl NakamotoChainState { )?.ok_or_else(|| { warn!("While processing tenure change, failed to look up parent block's total tx fees"; "parent_block_id" => %block.header.parent_block_id, - "block_hash" => %block.header.block_hash(), - "block_consensus_hash" => %block.header.consensus_hash); + "consensus_hash" => %block.header.consensus_hash, + "stacks_block_hash" => %block.header.block_hash(), + "stacks_block_id" => %block.header.block_id() + ); ChainstateError::NoSuchBlockError })? } else { @@ -1028,7 +1027,7 @@ impl NakamotoChainState { /// particular burnchain fork. /// Return the block snapshot if so. pub(crate) fn check_sortition_exists( - burn_dbconn: &mut SortitionHandleTx, + burn_dbconn: &SortitionHandleConn, block_consensus_hash: &ConsensusHash, ) -> Result { // check that the burnchain block that this block is associated with has been processed. @@ -1044,9 +1043,8 @@ impl NakamotoChainState { ChainstateError::NoSuchBlockError })?; - let sortition_tip = burn_dbconn.context.chain_tip.clone(); let snapshot = burn_dbconn - .get_block_snapshot(&burn_header_hash, &sortition_tip)? + .get_block_snapshot(&burn_header_hash)? .ok_or_else(|| { warn!( "Tried to process Nakamoto block before its burn view was processed"; diff --git a/stackslib/src/chainstate/nakamoto/test_signers.rs b/stackslib/src/chainstate/nakamoto/test_signers.rs index 30a1ba81208..13d7f2ff1ed 100644 --- a/stackslib/src/chainstate/nakamoto/test_signers.rs +++ b/stackslib/src/chainstate/nakamoto/test_signers.rs @@ -19,6 +19,8 @@ use std::collections::{HashSet, VecDeque}; use std::path::{Path, PathBuf}; use std::{fs, io}; +use clarity::util::hash::MerkleHashFunc; +use clarity::util::secp256k1::{MessageSignature, Secp256k1PrivateKey, Secp256k1PublicKey}; use clarity::vm::clarity::ClarityConnection; use clarity::vm::costs::{ExecutionCost, LimitedCostTracker}; use clarity::vm::types::*; @@ -28,13 +30,16 @@ use rand::{CryptoRng, RngCore, SeedableRng}; use rand_chacha::ChaCha20Rng; use stacks_common::address::*; use stacks_common::consts::{FIRST_BURNCHAIN_CONSENSUS_HASH, FIRST_STACKS_BLOCK_HASH}; -use stacks_common::types::chainstate::{BlockHeaderHash, SortitionId, StacksBlockId, VRFSeed}; +use stacks_common::types::chainstate::{ + BlockHeaderHash, SortitionId, StacksAddress, StacksBlockId, VRFSeed, +}; use stacks_common::util::hash::Hash160; use stacks_common::util::sleep_ms; use stacks_common::util::vrf::{VRFProof, VRFPublicKey}; use wsts::curve::point::Point; use wsts::traits::Aggregator; +use self::boot::RewardSet; use crate::burnchains::bitcoin::indexer::BitcoinIndexer; use crate::burnchains::*; use crate::chainstate::burn::db::sortdb::*; @@ -45,10 +50,10 @@ use crate::chainstate::burn::*; use crate::chainstate::coordinator::{ ChainsCoordinator, Error as CoordinatorError, OnChainRewardSetProvider, }; -use crate::chainstate::nakamoto::coordinator::get_nakamoto_next_recipients; use crate::chainstate::nakamoto::miner::NakamotoBlockBuilder; use crate::chainstate::nakamoto::{NakamotoBlock, NakamotoBlockHeader, NakamotoChainState}; use crate::chainstate::stacks::address::PoxAddress; +use crate::chainstate::stacks::boot::{NakamotoSignerEntry, PoxStartCycleInfo}; use crate::chainstate::stacks::db::*; use crate::chainstate::stacks::miner::*; use crate::chainstate::stacks::{ @@ -77,6 +82,8 @@ pub struct TestSigners { pub party_key_ids: Vec>, /// The cycle for which the signers are valid pub cycle: u64, + /// The signer's private keys + pub signer_keys: Vec, } impl Default for TestSigners { @@ -104,6 +111,11 @@ impl Default for TestSigners { }) .collect(); + let mut signer_keys = Vec::::new(); + for _ in 0..num_keys { + signer_keys.push(Secp256k1PrivateKey::default()); + } + // Generate an aggregate public key let poly_commitments = match wsts::v2::test_helpers::dkg(&mut signer_parties, &mut rng) { Ok(poly_commitments) => poly_commitments, @@ -124,18 +136,153 @@ impl Default for TestSigners { threshold, party_key_ids, cycle: 0, + signer_keys, } } } impl TestSigners { + /// Generate TestSigners using a list of signer keys + pub fn new(signer_keys: Vec) -> Self { + TestSigners::default_with_signers(signer_keys) + } + + /// Internal function to generate aggregate key information + fn default_with_signers(signer_keys: Vec) -> Self { + let mut rng = rand_core::OsRng::default(); + let num_keys = 10; + let threshold = 7; + let party_key_ids: Vec> = + vec![vec![1, 2, 3], vec![4, 5], vec![6, 7, 8], vec![9, 10]]; + let num_parties = party_key_ids.len().try_into().unwrap(); + + // Create the parties + let mut signer_parties: Vec = party_key_ids + .iter() + .enumerate() + .map(|(pid, pkids)| { + wsts::v2::Party::new( + pid.try_into().unwrap(), + pkids, + num_parties, + num_keys, + threshold, + &mut rng, + ) + }) + .collect(); + + // Generate an aggregate public key + let poly_commitments = match wsts::v2::test_helpers::dkg(&mut signer_parties, &mut rng) { + Ok(poly_commitments) => poly_commitments, + Err(secret_errors) => { + panic!("Got secret errors from DKG: {:?}", secret_errors); + } + }; + let mut sig_aggregator = wsts::v2::Aggregator::new(num_keys, threshold); + sig_aggregator + .init(&poly_commitments) + .expect("aggregator init failed"); + let aggregate_public_key = sig_aggregator.poly[0]; + Self { + signer_parties, + aggregate_public_key, + poly_commitments, + num_keys, + threshold, + party_key_ids, + cycle: 0, + signer_keys, + } + } + + /// Sign a Nakamoto block using [`Self::signer_keys`]. + /// + /// N.B. If any of [`Self::signer_keys`] are not in the reward set, the resulting + /// signatures will be invalid. Use [`Self::sign_block_with_reward_set()`] to ensure + /// that any signer keys not in the reward set are not included. pub fn sign_nakamoto_block(&mut self, block: &mut NakamotoBlock, cycle: u64) { // Update the aggregate public key if the cycle has changed if self.cycle != cycle { self.generate_aggregate_key(cycle); } - let mut rng = rand_core::OsRng; + let signer_signature = self.generate_block_signatures(&block); + + test_debug!( + "Signed Nakamoto block {} with {} signatures (rc {})", + block.block_id(), + signer_signature.len(), + cycle + ); + block.header.signer_signature = signer_signature; + } + + /// Sign a NakamotoBlock and maintain the order and membership + /// of the reward set signers in the resulting signatures. + /// + /// If any of [`Self::signer_keys`] are not in the reward set, their signatures + /// will not be included. + pub fn sign_block_with_reward_set(&self, block: &mut NakamotoBlock, reward_set: &RewardSet) { + let signatures = self.generate_ordered_signatures(block, reward_set); + block.header.signer_signature = signatures; + } + + /// Synthesize a reward set from the signer for the purposes of signing and verifying blocks + /// later on + pub fn synthesize_reward_set(&self) -> RewardSet { + let mut signer_entries = vec![]; + let mut pox_addrs = vec![]; + for key in self.signer_keys.iter() { + let signing_key_vec = Secp256k1PublicKey::from_private(key).to_bytes_compressed(); + let mut signing_key = [0u8; 33]; + signing_key[0..33].copy_from_slice(&signing_key_vec[0..33]); + + let nakamoto_signer_entry = NakamotoSignerEntry { + signing_key, + stacked_amt: 100_000_000_000, + weight: 1, + }; + let pox_addr = PoxAddress::Standard( + StacksAddress { + version: AddressHashMode::SerializeP2PKH.to_version_testnet(), + bytes: Hash160::from_data(&nakamoto_signer_entry.signing_key), + }, + Some(AddressHashMode::SerializeP2PKH), + ); + signer_entries.push(nakamoto_signer_entry); + pox_addrs.push(pox_addr); + } + + RewardSet { + rewarded_addresses: pox_addrs, + start_cycle_state: PoxStartCycleInfo { + missed_reward_slots: vec![], + }, + signers: Some(signer_entries), + pox_ustx_threshold: Some(100_000_000_000), + } + } + + /// Sign a Nakamoto block and generate a vec of signatures. The signatures will + /// be ordered by the signer's public keys, but will not be checked against the + /// reward set. + fn generate_block_signatures(&self, block: &NakamotoBlock) -> Vec { + let msg = block.header.signer_signature_hash().0; + let mut keys = self.signer_keys.clone(); + keys.sort_by(|a, b| { + let a = Secp256k1PublicKey::from_private(a).to_bytes_compressed(); + let b = Secp256k1PublicKey::from_private(b).to_bytes_compressed(); + a.cmp(&b) + }); + keys.iter().map(|key| key.sign(&msg).unwrap()).collect() + } + + /// Sign a Nakamoto block using the aggregate key. + /// NB: this function is current unused. + #[allow(dead_code)] + fn sign_block_with_aggregate_key(&mut self, block: &NakamotoBlock) -> ThresholdSignature { + let mut rng = rand_core::OsRng::default(); let msg = block.header.signer_signature_hash().0; let (nonces, sig_shares, key_ids) = wsts::v2::test_helpers::sign(msg.as_slice(), &mut self.signer_parties, &mut rng); @@ -147,14 +294,56 @@ impl TestSigners { let signature = sig_aggregator .sign(msg.as_slice(), &nonces, &sig_shares, &key_ids) .expect("aggregator sig failed"); + ThresholdSignature(signature) + } - test_debug!( - "Signed Nakamoto block {} with {} (rc {})", - block.block_id(), - &self.aggregate_public_key, - cycle - ); - block.header.signer_signature = ThresholdSignature(signature); + /// Generate an list of signatures for a block. Only + /// signers in the reward set will be included. + pub fn generate_ordered_signatures( + &self, + block: &NakamotoBlock, + reward_set: &RewardSet, + ) -> Vec { + let msg = block.header.signer_signature_hash().0; + + let test_signers_by_pk = self + .signer_keys + .iter() + .cloned() + .map(|s| { + let pk = Secp256k1PublicKey::from_private(&s); + (pk.to_bytes_compressed(), s) + }) + .collect::>(); + + let reward_set_keys = &reward_set + .clone() + .signers + .unwrap() + .iter() + .map(|s| s.signing_key.to_vec()) + .collect::>(); + + let mut signatures = Vec::with_capacity(reward_set_keys.len()); + + let mut missing_keys = 0; + + for key in reward_set_keys { + if let Some(signer_key) = test_signers_by_pk.get(key) { + let signature = signer_key.sign(&msg).unwrap(); + signatures.push(signature); + } else { + missing_keys += 1; + } + } + if missing_keys > 0 { + warn!( + "TestSigners: {} keys are in the reward set but not in signer_keys", + missing_keys + ); + } + + signatures } // Generate and assign a new aggregate public key diff --git a/stackslib/src/chainstate/nakamoto/tests/mod.rs b/stackslib/src/chainstate/nakamoto/tests/mod.rs index 9844fa7b74f..722cfa541af 100644 --- a/stackslib/src/chainstate/nakamoto/tests/mod.rs +++ b/stackslib/src/chainstate/nakamoto/tests/mod.rs @@ -19,12 +19,15 @@ use std::collections::HashMap; use std::fs; use clarity::types::chainstate::{PoxId, SortitionId, StacksBlockId}; +use clarity::util::secp256k1::Secp256k1PrivateKey; use clarity::vm::clarity::ClarityConnection; use clarity::vm::costs::ExecutionCost; use clarity::vm::types::StacksAddressExtensions; use clarity::vm::Value; +use libstackerdb::StackerDBChunkData; use rand::{thread_rng, RngCore}; -use rusqlite::{Connection, ToSql}; +use rusqlite::types::ToSql; +use rusqlite::{params, Connection}; use stacks_common::address::AddressHashMode; use stacks_common::bitvec::BitVec; use stacks_common::codec::StacksMessageCodec; @@ -60,16 +63,18 @@ use crate::chainstate::coordinator::tests::{ use crate::chainstate::nakamoto::coordinator::tests::boot_nakamoto; use crate::chainstate::nakamoto::miner::NakamotoBlockBuilder; use crate::chainstate::nakamoto::signer_set::NakamotoSigners; -use crate::chainstate::nakamoto::staging_blocks::NakamotoStagingBlocksConnRef; -use crate::chainstate::nakamoto::tenure::NakamotoTenure; +use crate::chainstate::nakamoto::staging_blocks::{ + NakamotoBlockObtainMethod, NakamotoStagingBlocksConnRef, +}; +use crate::chainstate::nakamoto::tenure::NakamotoTenureEvent; use crate::chainstate::nakamoto::test_signers::TestSigners; use crate::chainstate::nakamoto::tests::node::TestStacker; use crate::chainstate::nakamoto::{ - query_rows, NakamotoBlock, NakamotoBlockHeader, NakamotoChainState, SortitionHandle, + query_row, NakamotoBlock, NakamotoBlockHeader, NakamotoChainState, SortitionHandle, FIRST_STACKS_BLOCK_ID, }; use crate::chainstate::stacks::boot::{ - MINERS_NAME, SIGNERS_VOTING_FUNCTION_NAME, SIGNERS_VOTING_NAME, + NakamotoSignerEntry, RewardSet, MINERS_NAME, SIGNERS_VOTING_FUNCTION_NAME, SIGNERS_VOTING_NAME, }; use crate::chainstate::stacks::db::{ ChainStateBootData, ChainstateAccountBalance, ChainstateAccountLockup, ChainstateBNSName, @@ -91,19 +96,26 @@ use crate::util_lib::db::Error as db_error; use crate::util_lib::strings::StacksString; impl<'a> NakamotoStagingBlocksConnRef<'a> { - #[cfg(test)] pub fn get_all_blocks_in_tenure( &self, tenure_id_consensus_hash: &ConsensusHash, + tip: &StacksBlockId, ) -> Result, ChainstateError> { - let qry = "SELECT data FROM nakamoto_staging_blocks WHERE consensus_hash = ?1 ORDER BY height ASC"; - let args: &[&dyn ToSql] = &[tenure_id_consensus_hash]; - let block_data: Vec> = query_rows(self, qry, args)?; - let mut blocks = Vec::with_capacity(block_data.len()); - for data in block_data.into_iter() { - let block = NakamotoBlock::consensus_deserialize(&mut data.as_slice())?; + let mut blocks = vec![]; + let mut cursor = tip.clone(); + let qry = "SELECT data FROM nakamoto_staging_blocks WHERE index_block_hash = ?1"; + loop { + let Some(block_data): Option> = query_row(self, qry, params![cursor])? else { + break; + }; + let block = NakamotoBlock::consensus_deserialize(&mut block_data.as_slice())?; + if &block.header.consensus_hash != tenure_id_consensus_hash { + break; + } + cursor = block.header.parent_block_id.clone(); blocks.push(block); } + blocks.reverse(); Ok(blocks) } } @@ -125,9 +137,12 @@ pub fn get_account( &tip ); + let snapshot = SortitionDB::get_block_snapshot_consensus(&sortdb.conn(), &tip.consensus_hash) + .unwrap() + .unwrap(); chainstate .with_read_only_clarity_tx( - &sortdb.index_conn(), + &sortdb.index_handle(&snapshot.sortition_id), &tip.index_block_hash(), |clarity_conn| { StacksChainState::get_account(clarity_conn, &addr.to_account_principal()) @@ -152,9 +167,10 @@ fn codec_nakamoto_header() { parent_block_id: StacksBlockId([0x05; 32]), tx_merkle_root: Sha512Trunc256Sum([0x06; 32]), state_index_root: TrieHash([0x07; 32]), + timestamp: 8, miner_signature: MessageSignature::empty(), - signer_signature: ThresholdSignature::empty(), - signer_bitvec: BitVec::zeros(8).unwrap(), + signer_signature: vec![MessageSignature::from_bytes(&[0x01; 65]).unwrap()], + pox_treatment: BitVec::zeros(8).unwrap(), }; let mut bytes = vec![ @@ -172,17 +188,19 @@ fn codec_nakamoto_header() { 0x06, 0x06, // state index root 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, - 0x07, 0x07, // miner signature - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x07, 0x07, // timestamp + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, // miner signature 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, // stacker signature (mocked) - 0x02, 0x79, 0xbe, 0x66, 0x7e, 0xf9, 0xdc, 0xbb, 0xac, 0x55, 0xa0, 0x62, 0x95, 0xce, 0x87, - 0x0b, 0x07, 0x02, 0x9b, 0xfc, 0xdb, 0x2d, 0xce, 0x28, 0xd9, 0x59, 0xf2, 0x81, 0x5b, 0x16, - 0xf8, 0x17, 0x98, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, // signatures length + 0x00, 0x00, 0x00, 0x01, // stacker signature (mocked) + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, ]; let signer_bitvec_serialization = "00080000000100"; @@ -202,9 +220,10 @@ pub fn test_nakamoto_first_tenure_block_syntactic_validation() { parent_block_id: StacksBlockId([0x05; 32]), tx_merkle_root: Sha512Trunc256Sum([0x06; 32]), state_index_root: TrieHash([0x07; 32]), + timestamp: 8, miner_signature: MessageSignature::empty(), - signer_signature: ThresholdSignature::empty(), - signer_bitvec: BitVec::zeros(1).unwrap(), + signer_signature: vec![], + pox_treatment: BitVec::zeros(1).unwrap(), }; // sortition-inducing tenure change @@ -555,47 +574,7 @@ pub fn test_nakamoto_first_tenure_block_syntactic_validation() { ); } -struct MockSortitionHandle { - nakamoto_tip: (ConsensusHash, BlockHeaderHash, u64), -} - -impl MockSortitionHandle { - pub fn new(consensus_hash: ConsensusHash, bhh: BlockHeaderHash, height: u64) -> Self { - Self { - nakamoto_tip: (consensus_hash, bhh, height), - } - } -} - -impl SortitionHandle for MockSortitionHandle { - fn get_block_snapshot_by_height( - &mut self, - block_height: u64, - ) -> Result, db_error> { - unimplemented!() - } - - fn first_burn_block_height(&self) -> u64 { - unimplemented!() - } - - fn pox_constants(&self) -> &PoxConstants { - unimplemented!() - } - - fn sqlite(&self) -> &Connection { - unimplemented!() - } - - fn tip(&self) -> SortitionId { - unimplemented!() - } - - fn get_nakamoto_tip(&self) -> Result, db_error> { - Ok(Some(self.nakamoto_tip.clone())) - } -} - +/// Tests for non-MARF'ed block storage #[test] pub fn test_load_store_update_nakamoto_blocks() { let test_name = function_name!(); @@ -674,6 +653,7 @@ pub fn test_load_store_update_nakamoto_blocks() { burn_header_height: 100, burn_header_timestamp: 1000, anchored_block_size: 12345, + burn_view: None, }; let epoch2_execution_cost = ExecutionCost { @@ -686,7 +666,7 @@ pub fn test_load_store_update_nakamoto_blocks() { let tenure_change_payload = TenureChangePayload { tenure_consensus_hash: ConsensusHash([0x04; 20]), // same as in nakamoto header - prev_tenure_consensus_hash: ConsensusHash([0x01; 20]), + prev_tenure_consensus_hash: epoch2_consensus_hash.clone(), burn_view_consensus_hash: ConsensusHash([0x04; 20]), previous_tenure_end: epoch2_parent_block_id.clone(), previous_tenure_blocks: 1, @@ -721,6 +701,14 @@ pub fn test_load_store_update_nakamoto_blocks() { stx_transfer_tx_3.chain_id = 0x80000000; stx_transfer_tx_3.anchor_mode = TransactionAnchorMode::OnChainOnly; + let mut stx_transfer_tx_4 = StacksTransaction::new( + TransactionVersion::Testnet, + TransactionAuth::from_p2pkh(&private_key).unwrap(), + TransactionPayload::TokenTransfer(recipient_addr.into(), 125, TokenTransferMemo([0u8; 34])), + ); + stx_transfer_tx_4.chain_id = 0x80000000; + stx_transfer_tx_4.anchor_mode = TransactionAnchorMode::OnChainOnly; + let nakamoto_txs = vec![tenure_change_tx.clone(), coinbase_tx.clone()]; let nakamoto_tx_merkle_root = { let txid_vecs = nakamoto_txs @@ -751,6 +739,21 @@ pub fn test_load_store_update_nakamoto_blocks() { MerkleTree::::new(&txid_vecs).root() }; + let nakamoto_txs_4 = vec![stx_transfer_tx_4.clone()]; + let nakamoto_tx_merkle_root_4 = { + let txid_vecs = nakamoto_txs_4 + .iter() + .map(|tx| tx.txid().as_bytes().to_vec()) + .collect(); + + MerkleTree::::new(&txid_vecs).root() + }; + + let header_signatures = vec![ + MessageSignature::from_bytes(&[0x01; 65]).unwrap(), + MessageSignature::from_bytes(&[0x02; 65]).unwrap(), + ]; + let nakamoto_header = NakamotoBlockHeader { version: 1, chain_length: 457, @@ -759,9 +762,10 @@ pub fn test_load_store_update_nakamoto_blocks() { parent_block_id: epoch2_parent_block_id.clone(), tx_merkle_root: nakamoto_tx_merkle_root, state_index_root: TrieHash([0x07; 32]), + timestamp: 8, miner_signature: MessageSignature::empty(), - signer_signature: ThresholdSignature::empty(), - signer_bitvec: BitVec::zeros(1).unwrap(), + signer_signature: header_signatures.clone(), + pox_treatment: BitVec::zeros(1).unwrap(), }; let nakamoto_header_info = StacksHeaderInfo { @@ -774,6 +778,7 @@ pub fn test_load_store_update_nakamoto_blocks() { burn_header_height: 200, burn_header_timestamp: 1001, anchored_block_size: 123, + burn_view: Some(nakamoto_header.consensus_hash), }; let epoch2_block = StacksBlock { @@ -803,9 +808,10 @@ pub fn test_load_store_update_nakamoto_blocks() { parent_block_id: nakamoto_header.block_id(), tx_merkle_root: nakamoto_tx_merkle_root_2, state_index_root: TrieHash([0x07; 32]), + timestamp: 8, miner_signature: MessageSignature::empty(), - signer_signature: ThresholdSignature::empty(), - signer_bitvec: BitVec::zeros(1).unwrap(), + signer_signature: vec![], + pox_treatment: BitVec::zeros(1).unwrap(), }; let nakamoto_header_info_2 = StacksHeaderInfo { @@ -818,6 +824,7 @@ pub fn test_load_store_update_nakamoto_blocks() { burn_header_height: 200, burn_header_timestamp: 1001, anchored_block_size: 123, + burn_view: Some(nakamoto_header_2.consensus_hash), }; let nakamoto_block_2 = NakamotoBlock { @@ -842,9 +849,10 @@ pub fn test_load_store_update_nakamoto_blocks() { parent_block_id: nakamoto_header_2.block_id(), tx_merkle_root: nakamoto_tx_merkle_root_3, state_index_root: TrieHash([0x07; 32]), + timestamp: 8, miner_signature: MessageSignature::empty(), - signer_signature: ThresholdSignature::empty(), - signer_bitvec: BitVec::zeros(1).unwrap(), + signer_signature: vec![], + pox_treatment: BitVec::zeros(1).unwrap(), }; let nakamoto_header_info_3 = StacksHeaderInfo { @@ -857,19 +865,96 @@ pub fn test_load_store_update_nakamoto_blocks() { burn_header_height: 200, burn_header_timestamp: 1001, anchored_block_size: 123, + burn_view: Some(nakamoto_header_3.consensus_hash), }; let nakamoto_block_3 = NakamotoBlock { header: nakamoto_header_3.clone(), - txs: nakamoto_txs_3, + txs: nakamoto_txs_3.clone(), + }; + + // third nakamoto block, but with a higher signing weight + let nakamoto_header_3_weight_2 = NakamotoBlockHeader { + version: 1, + chain_length: 459, + burn_spent: 128, + consensus_hash: tenure_change_payload.tenure_consensus_hash.clone(), + parent_block_id: nakamoto_header_2.block_id(), + tx_merkle_root: nakamoto_tx_merkle_root_3, + state_index_root: TrieHash([0x07; 32]), + timestamp: 8, + miner_signature: MessageSignature::empty(), + signer_signature: vec![MessageSignature::from_bytes(&[0x01; 65]).unwrap()], + pox_treatment: BitVec::zeros(1).unwrap(), + }; + + let nakamoto_header_info_3_weight_2 = StacksHeaderInfo { + anchored_header: StacksBlockHeaderTypes::Nakamoto(nakamoto_header_3_weight_2.clone()), + microblock_tail: None, + stacks_block_height: nakamoto_header_2.chain_length, + index_root: TrieHash([0x67; 32]), + consensus_hash: nakamoto_header_2.consensus_hash.clone(), + burn_header_hash: BurnchainHeaderHash([0x88; 32]), + burn_header_height: 200, + burn_header_timestamp: 1001, + anchored_block_size: 123, + burn_view: Some(nakamoto_header_3.consensus_hash), + }; + + let nakamoto_block_3_weight_2 = NakamotoBlock { + header: nakamoto_header_3_weight_2.clone(), + txs: nakamoto_txs_3.clone(), + }; + + // fourth nakamoto block -- confirms nakamoto_block_3_weight_2 + let nakamoto_header_4 = NakamotoBlockHeader { + version: 1, + chain_length: 460, + burn_spent: 128, + consensus_hash: tenure_change_payload.tenure_consensus_hash.clone(), + parent_block_id: nakamoto_header_3_weight_2.block_id(), + tx_merkle_root: nakamoto_tx_merkle_root_4, + state_index_root: TrieHash([0x71; 32]), + timestamp: 10, + miner_signature: MessageSignature::empty(), + signer_signature: vec![], + pox_treatment: BitVec::zeros(1).unwrap(), + }; + + let nakamoto_header_info_4 = StacksHeaderInfo { + anchored_header: StacksBlockHeaderTypes::Nakamoto(nakamoto_header_4.clone()), + microblock_tail: None, + stacks_block_height: nakamoto_header_4.chain_length, + index_root: TrieHash([0x71; 32]), + consensus_hash: nakamoto_header_3_weight_2.consensus_hash.clone(), + burn_header_hash: BurnchainHeaderHash([0x88; 32]), + burn_header_height: 200, + burn_header_timestamp: 1001, + anchored_block_size: 123, + burn_view: Some(nakamoto_header_4.consensus_hash), }; + let nakamoto_block_4 = NakamotoBlock { + header: nakamoto_header_4.clone(), + txs: nakamoto_txs_4.clone(), + }; + + // nakamoto block 3 only differs in signers + assert_eq!( + nakamoto_block_3.block_id(), + nakamoto_block_3_weight_2.block_id() + ); + assert_eq!( + nakamoto_block_3.header.signer_signature_hash(), + nakamoto_block_3_weight_2.header.signer_signature_hash() + ); + let mut total_nakamoto_execution_cost = nakamoto_execution_cost.clone(); total_nakamoto_execution_cost .add(&nakamoto_execution_cost_2) .unwrap(); - let nakamoto_tenure = NakamotoTenure { + let nakamoto_tenure = NakamotoTenureEvent { tenure_id_consensus_hash: tenure_change_payload.tenure_consensus_hash.clone(), prev_tenure_id_consensus_hash: tenure_change_payload.prev_tenure_consensus_hash.clone(), burn_view_consensus_hash: tenure_change_payload.burn_view_consensus_hash.clone(), @@ -877,7 +962,6 @@ pub fn test_load_store_update_nakamoto_blocks() { block_hash: nakamoto_block.header.block_hash(), block_id: nakamoto_block.header.block_id(), coinbase_height: epoch2_header.total_work.work + 1, - tenure_index: 1, num_blocks_confirmed: 1, }; @@ -898,17 +982,14 @@ pub fn test_load_store_update_nakamoto_blocks() { // tenure length doesn't apply to epoch2 blocks assert_eq!( - NakamotoChainState::get_nakamoto_tenure_length(&tx, &epoch2_header_info.consensus_hash) - .unwrap(), + NakamotoChainState::get_nakamoto_tenure_length( + &tx, + &epoch2_header_info.index_block_hash() + ) + .unwrap(), 0 ); - // no tenure rows - assert_eq!( - NakamotoChainState::get_highest_nakamoto_coinbase_height(&tx, i64::MAX as u64).unwrap(), - None - ); - // but, this upcoming tenure-change payload should be the first-ever tenure-change payload! assert!(NakamotoChainState::check_first_nakamoto_tenure_change( &tx, @@ -919,46 +1000,25 @@ pub fn test_load_store_update_nakamoto_blocks() { // no tenure yet, so zero blocks assert_eq!( - NakamotoChainState::get_nakamoto_tenure_length( - &tx, - &nakamoto_block.header.consensus_hash - ) - .unwrap(), + NakamotoChainState::get_nakamoto_tenure_length(&tx, &nakamoto_block.header.block_id(),) + .unwrap(), 0 ); - // no tenure rows - assert_eq!( - NakamotoChainState::get_highest_nakamoto_coinbase_height(&tx, i64::MAX as u64).unwrap(), - None - ); - // add the tenure for these blocks NakamotoChainState::insert_nakamoto_tenure( &tx, &nakamoto_header, epoch2_header.total_work.work + 1, - 1, &tenure_change_payload, ) .unwrap(); // no blocks yet, so zero blocks assert_eq!( - NakamotoChainState::get_nakamoto_tenure_length( - &tx, - &nakamoto_block.header.consensus_hash - ) - .unwrap(), - 0 - ); - - // have a tenure - assert_eq!( - NakamotoChainState::get_highest_nakamoto_coinbase_height(&tx, i64::MAX as u64) - .unwrap() + NakamotoChainState::get_nakamoto_tenure_length(&tx, &nakamoto_block.header.block_id(),) .unwrap(), - epoch2_header.total_work.work + 1 + 0 ); // this succeeds now @@ -970,27 +1030,24 @@ pub fn test_load_store_update_nakamoto_blocks() { &nakamoto_execution_cost, &nakamoto_execution_cost, true, + 1, 300, ) .unwrap(); - NakamotoChainState::store_block(&staging_tx, nakamoto_block.clone(), false).unwrap(); + NakamotoChainState::store_block_if_better( + &staging_tx, + &nakamoto_block, + false, + 1, + NakamotoBlockObtainMethod::Downloaded, + ) + .unwrap(); // tenure has one block assert_eq!( - NakamotoChainState::get_nakamoto_tenure_length( - &tx, - &nakamoto_block.header.consensus_hash - ) - .unwrap(), - 1 - ); - - // same tenure - assert_eq!( - NakamotoChainState::get_highest_nakamoto_coinbase_height(&tx, i64::MAX as u64) - .unwrap() + NakamotoChainState::get_nakamoto_tenure_length(&tx, &nakamoto_block.header.block_id(),) .unwrap(), - epoch2_header.total_work.work + 1 + 1 ); // this succeeds now @@ -1002,32 +1059,94 @@ pub fn test_load_store_update_nakamoto_blocks() { &nakamoto_execution_cost, &total_nakamoto_execution_cost, false, + 2, 400, ) .unwrap(); - NakamotoChainState::store_block(&staging_tx, nakamoto_block_2.clone(), false).unwrap(); + NakamotoChainState::store_block_if_better( + &staging_tx, + &nakamoto_block_2, + false, + 1, + NakamotoBlockObtainMethod::Downloaded, + ) + .unwrap(); // tenure has two blocks assert_eq!( NakamotoChainState::get_nakamoto_tenure_length( &tx, - &nakamoto_block.header.consensus_hash + &nakamoto_block_2.header.block_id(), ) .unwrap(), 2 ); + assert_eq!( + NakamotoChainState::get_nakamoto_tenure_length(&tx, &nakamoto_block.header.block_id(),) + .unwrap(), + 1 + ); - // same tenure + // store, but do not process, a block + NakamotoChainState::store_block_if_better( + &staging_tx, + &nakamoto_block_3, + false, + 1, + NakamotoBlockObtainMethod::Downloaded, + ) + .unwrap(); + assert_eq!( + staging_tx + .conn() + .get_nakamoto_block(&nakamoto_header_3.block_id()) + .unwrap() + .unwrap() + .0, + nakamoto_block_3 + ); assert_eq!( - NakamotoChainState::get_highest_nakamoto_coinbase_height(&tx, i64::MAX as u64) + staging_tx + .conn() + .get_block_processed_and_signed_weight( + &nakamoto_header_3.consensus_hash, + &nakamoto_header_3.block_hash(), + ) .unwrap() .unwrap(), - epoch2_header.total_work.work + 1 + (nakamoto_header_3.block_id(), false, false, 1) ); - // store, but do not process, a block - NakamotoChainState::store_block(&staging_tx, nakamoto_block_3.clone(), false).unwrap(); + // store, but do not process, the same block with a heavier weight + NakamotoChainState::store_block_if_better( + &staging_tx, + &nakamoto_block_3_weight_2, + false, + 2, + NakamotoBlockObtainMethod::Downloaded, + ) + .unwrap(); + assert_eq!( + staging_tx + .conn() + .get_nakamoto_block(&nakamoto_header_3_weight_2.block_id()) + .unwrap() + .unwrap() + .0, + nakamoto_block_3_weight_2 + ); + assert_eq!( + staging_tx + .conn() + .get_block_processed_and_signed_weight( + &nakamoto_header_3.consensus_hash, + &nakamoto_header_3.block_hash(), + ) + .unwrap() + .unwrap(), + (nakamoto_header_3_weight_2.block_id(), false, false, 2) + ); staging_tx.commit().unwrap(); tx.commit().unwrap(); @@ -1035,13 +1154,16 @@ pub fn test_load_store_update_nakamoto_blocks() { // can load Nakamoto block, but only the Nakamoto block let nakamoto_blocks_db = chainstate.nakamoto_blocks_db(); + let first_nakamoto_block = nakamoto_blocks_db + .get_nakamoto_block(&nakamoto_header.block_id()) + .unwrap() + .unwrap() + .0; + assert_eq!(first_nakamoto_block, nakamoto_block,); + // Double check that the signatures match assert_eq!( - nakamoto_blocks_db - .get_nakamoto_block(&nakamoto_header.block_id()) - .unwrap() - .unwrap() - .0, - nakamoto_block + first_nakamoto_block.header.signer_signature, + header_signatures ); assert_eq!( nakamoto_blocks_db @@ -1073,6 +1195,19 @@ pub fn test_load_store_update_nakamoto_blocks() { (true, false) ); + // however, in the staging DB, this block is not yet marked as processed + assert_eq!( + chainstate + .nakamoto_blocks_db() + .get_block_processed_and_signed_weight( + &nakamoto_header.consensus_hash, + &nakamoto_header.block_hash(), + ) + .unwrap() + .unwrap(), + (nakamoto_header.block_id(), false, false, 1) + ); + // same goes for block 2 assert_eq!( NakamotoChainState::get_nakamoto_block_status( @@ -1085,20 +1220,43 @@ pub fn test_load_store_update_nakamoto_blocks() { .unwrap(), (true, false) ); + assert_eq!( + chainstate + .nakamoto_blocks_db() + .get_block_processed_and_signed_weight( + &nakamoto_header.consensus_hash, + &nakamoto_header_2.block_hash(), + ) + .unwrap() + .unwrap(), + (nakamoto_header_2.block_id(), false, false, 1) + ); // block 3 has only been stored, but no header has been added assert_eq!( NakamotoChainState::get_nakamoto_block_status( chainstate.nakamoto_blocks_db(), chainstate.db(), - &nakamoto_header_3.consensus_hash, - &nakamoto_header_3.block_hash() + &nakamoto_header_3_weight_2.consensus_hash, + &nakamoto_header_3_weight_2.block_hash() ) .unwrap() .unwrap(), (false, false) ); + assert_eq!( + chainstate + .nakamoto_blocks_db() + .get_block_processed_and_signed_weight( + &nakamoto_header_3_weight_2.consensus_hash, + &nakamoto_header_3_weight_2.block_hash() + ) + .unwrap() + .unwrap(), + (nakamoto_header_3_weight_2.block_id(), false, false, 2) + ); + // this method doesn't return data for epoch2 assert_eq!( NakamotoChainState::get_nakamoto_block_status( @@ -1111,41 +1269,224 @@ pub fn test_load_store_update_nakamoto_blocks() { None ); - // set nakamoto block processed + // set nakamoto block processed, and store a sibling if it's the chain tip + { + let (tx, staging_tx) = chainstate.headers_and_staging_tx_begin().unwrap(); + + staging_tx + .set_block_processed(&nakamoto_header_3_weight_2.block_id()) + .unwrap(); + assert_eq!( + NakamotoChainState::get_nakamoto_block_status( + staging_tx.conn(), + &tx, + &nakamoto_header_3_weight_2.consensus_hash, + &nakamoto_header_3_weight_2.block_hash() + ) + .unwrap() + .unwrap(), + (true, false) + ); + assert_eq!( + staging_tx + .conn() + .get_block_processed_and_signed_weight( + &nakamoto_header_3_weight_2.consensus_hash, + &nakamoto_header_3_weight_2.block_hash() + ) + .unwrap() + .unwrap(), + (nakamoto_header_3_weight_2.block_id(), true, false, 2) + ); + + // store a sibling with more weight, even though this block has been processed. + // This is allowed because we don't commit to signatures. + NakamotoChainState::store_block_if_better( + &staging_tx, + &nakamoto_block_3, + false, + 3, + NakamotoBlockObtainMethod::Downloaded, + ) + .unwrap(); + assert_eq!( + staging_tx + .conn() + .get_nakamoto_block(&nakamoto_header_3.block_id()) + .unwrap() + .unwrap() + .0, + nakamoto_block_3 + ); + assert_eq!( + staging_tx + .conn() + .get_block_processed_and_signed_weight( + &nakamoto_header_3.consensus_hash, + &nakamoto_header_3.block_hash() + ) + .unwrap() + .unwrap(), + (nakamoto_header_3.block_id(), true, false, 3) + ); + } + + // set nakamoto block processed, and store a processed children, and verify that we'll still + // accept siblings with higher signing power. { let (tx, staging_tx) = chainstate.headers_and_staging_tx_begin().unwrap(); + + // set block 3 weight 2 processed + staging_tx + .set_block_processed(&nakamoto_header_3_weight_2.block_id()) + .unwrap(); + assert_eq!( + NakamotoChainState::get_nakamoto_block_status( + staging_tx.conn(), + &tx, + &nakamoto_header_3_weight_2.consensus_hash, + &nakamoto_header_3_weight_2.block_hash() + ) + .unwrap() + .unwrap(), + (true, false) + ); + assert_eq!( + staging_tx + .conn() + .get_block_processed_and_signed_weight( + &nakamoto_header_3_weight_2.consensus_hash, + &nakamoto_header_3_weight_2.block_hash() + ) + .unwrap() + .unwrap(), + (nakamoto_header_3_weight_2.block_id(), true, false, 2) + ); + + // store block 4, which descends from block 3 weight 2 + NakamotoChainState::store_block_if_better( + &staging_tx, + &nakamoto_block_4, + false, + 1, + NakamotoBlockObtainMethod::Downloaded, + ) + .unwrap(); + assert_eq!( + staging_tx + .conn() + .get_nakamoto_block(&nakamoto_header_4.block_id()) + .unwrap() + .unwrap() + .0, + nakamoto_block_4 + ); + + // set block 4 processed staging_tx - .set_block_processed(&nakamoto_header_3.block_id()) + .set_block_processed(&nakamoto_header_4.block_id()) .unwrap(); assert_eq!( NakamotoChainState::get_nakamoto_block_status( staging_tx.conn(), &tx, - &nakamoto_header_3.consensus_hash, - &nakamoto_header_3.block_hash() + &nakamoto_header_4.consensus_hash, + &nakamoto_header_4.block_hash() ) .unwrap() .unwrap(), (true, false) ); + + NakamotoChainState::store_block_if_better( + &staging_tx, + &nakamoto_block_3, + false, + 3, + NakamotoBlockObtainMethod::Downloaded, + ) + .unwrap(); + assert_eq!( + staging_tx + .conn() + .get_nakamoto_block(&nakamoto_header_3.block_id()) + .unwrap() + .unwrap() + .0, + nakamoto_block_3 + ); } // set nakamoto block orphaned { let (tx, staging_tx) = chainstate.headers_and_staging_tx_begin().unwrap(); staging_tx - .set_block_orphaned(&nakamoto_header.block_id()) + .set_block_orphaned(&nakamoto_header_3_weight_2.block_id()) .unwrap(); assert_eq!( NakamotoChainState::get_nakamoto_block_status( staging_tx.conn(), &tx, - &nakamoto_header.consensus_hash, - &nakamoto_header.block_hash() + &nakamoto_header_3_weight_2.consensus_hash, + &nakamoto_header_3_weight_2.block_hash() ) .unwrap() .unwrap(), (true, true) ); + assert_eq!( + staging_tx + .conn() + .get_block_processed_and_signed_weight( + &nakamoto_header_3_weight_2.consensus_hash, + &nakamoto_header_3_weight_2.block_hash() + ) + .unwrap() + .unwrap(), + (nakamoto_header_3_weight_2.block_id(), true, true, 2) + ); + + // can't re-store it, even if its signing power is better + assert!(!NakamotoChainState::store_block_if_better( + &staging_tx, + &nakamoto_block_3_weight_2, + false, + 3, + NakamotoBlockObtainMethod::Downloaded + ) + .unwrap()); + assert_eq!( + NakamotoChainState::get_nakamoto_block_status( + staging_tx.conn(), + &tx, + &nakamoto_header_3_weight_2.consensus_hash, + &nakamoto_header_3_weight_2.block_hash() + ) + .unwrap() + .unwrap(), + (true, true) + ); + + // can't store a sibling with the same sighash either, since if a block with the given sighash is orphaned, then + // it doesn't matter how many signers it has + assert!(!NakamotoChainState::store_block_if_better( + &staging_tx, + &nakamoto_block_3, + false, + 3, + NakamotoBlockObtainMethod::Downloaded + ) + .unwrap()); + assert_eq!( + staging_tx + .conn() + .get_block_processed_and_signed_weight( + &nakamoto_header_3.consensus_hash, + &nakamoto_header_3.block_hash() + ) + .unwrap() + .unwrap(), + (nakamoto_header_3_weight_2.block_id(), true, true, 2) + ); } // orphan nakamoto block by parent { @@ -1164,48 +1505,19 @@ pub fn test_load_store_update_nakamoto_blocks() { .unwrap(), (false, true) ); + assert_eq!( + staging_tx + .conn() + .get_block_processed_and_signed_weight( + &nakamoto_header.consensus_hash, + &nakamoto_header.block_hash() + ) + .unwrap() + .unwrap(), + (nakamoto_header.block_id(), false, true, 1) + ); } - // check start/finish - assert_eq!( - NakamotoChainState::get_nakamoto_tenure_start_block_header( - chainstate.db(), - &nakamoto_header.consensus_hash - ) - .unwrap() - .unwrap(), - nakamoto_header_info - ); - assert_eq!( - NakamotoChainState::get_nakamoto_tenure_finish_block_header( - chainstate.db(), - &nakamoto_header.consensus_hash - ) - .unwrap() - .unwrap(), - nakamoto_header_info_2 - ); - - // can query the tenure-start and epoch2 headers by consensus hash - assert_eq!( - NakamotoChainState::get_block_header_by_consensus_hash( - chainstate.db(), - &nakamoto_header.consensus_hash - ) - .unwrap() - .unwrap(), - nakamoto_header_info - ); - assert_eq!( - NakamotoChainState::get_block_header_by_consensus_hash( - chainstate.db(), - &epoch2_consensus_hash - ) - .unwrap() - .unwrap(), - epoch2_header_info - ); - // can query the tenure-start and epoch2 headers by block ID assert_eq!( NakamotoChainState::get_block_header(chainstate.db(), &nakamoto_header.block_id()) @@ -1229,29 +1541,6 @@ pub fn test_load_store_update_nakamoto_blocks() { epoch2_header_info ); - // can get tenure height of nakamoto blocks and epoch2 blocks - assert_eq!( - NakamotoChainState::get_coinbase_height(chainstate.db(), &nakamoto_header.block_id()) - .unwrap() - .unwrap(), - epoch2_header_info.anchored_header.height() + 1 - ); - assert_eq!( - NakamotoChainState::get_coinbase_height(chainstate.db(), &nakamoto_header_2.block_id()) - .unwrap() - .unwrap(), - epoch2_header_info.anchored_header.height() + 1 - ); - assert_eq!( - NakamotoChainState::get_coinbase_height( - chainstate.db(), - &epoch2_header_info.index_block_hash() - ) - .unwrap() - .unwrap(), - epoch2_header_info.anchored_header.height() - ); - // can get total tenure cost for nakamoto blocks, but not epoch2 blocks assert_eq!( NakamotoChainState::get_total_tenure_cost_at(chainstate.db(), &nakamoto_header.block_id()) @@ -1305,33 +1594,22 @@ pub fn test_load_store_update_nakamoto_blocks() { None ); - // can get block VRF proof for both nakamoto and epoch2 blocks - assert_eq!( - NakamotoChainState::get_block_vrf_proof(chainstate.db(), &nakamoto_header.consensus_hash) - .unwrap() - .unwrap(), - nakamoto_proof - ); - assert_eq!( - NakamotoChainState::get_block_vrf_proof(chainstate.db(), &epoch2_consensus_hash) - .unwrap() - .unwrap(), - epoch2_proof - ); - // can get nakamoto VRF proof only for nakamoto blocks assert_eq!( NakamotoChainState::get_nakamoto_tenure_vrf_proof( chainstate.db(), - &nakamoto_header.consensus_hash + &nakamoto_header.block_id(), ) .unwrap() .unwrap(), nakamoto_proof ); assert_eq!( - NakamotoChainState::get_nakamoto_tenure_vrf_proof(chainstate.db(), &epoch2_consensus_hash) - .unwrap(), + NakamotoChainState::get_nakamoto_tenure_vrf_proof( + chainstate.db(), + &epoch2_header_info.index_block_hash() + ) + .unwrap(), None ); @@ -1340,16 +1618,8 @@ pub fn test_load_store_update_nakamoto_blocks() { { let (tx, staging_tx) = chainstate.headers_and_staging_tx_begin().unwrap(); let staging_conn = staging_tx.conn(); - let sh = MockSortitionHandle::new( - nakamoto_block_2.header.consensus_hash.clone(), - nakamoto_block_2.header.block_hash(), - nakamoto_block_2.header.chain_length, - ); - assert_eq!( - staging_conn.next_ready_nakamoto_block(&tx, &sh).unwrap(), - None - ); + assert_eq!(staging_conn.next_ready_nakamoto_block(&tx).unwrap(), None); // set parent epoch2 block processed staging_tx @@ -1357,10 +1627,7 @@ pub fn test_load_store_update_nakamoto_blocks() { .unwrap(); // but it's not enough -- child's consensus hash needs to be burn_processable - assert_eq!( - staging_conn.next_ready_nakamoto_block(&tx, &sh).unwrap(), - None - ); + assert_eq!(staging_conn.next_ready_nakamoto_block(&tx).unwrap(), None); // set burn processed staging_tx @@ -1370,7 +1637,7 @@ pub fn test_load_store_update_nakamoto_blocks() { // this works now assert_eq!( staging_conn - .next_ready_nakamoto_block(&tx, &sh) + .next_ready_nakamoto_block(&tx) .unwrap() .unwrap() .0, @@ -1385,7 +1652,7 @@ pub fn test_load_store_update_nakamoto_blocks() { // next nakamoto block assert_eq!( staging_conn - .next_ready_nakamoto_block(&tx, &sh) + .next_ready_nakamoto_block(&tx) .unwrap() .unwrap() .0, @@ -1517,9 +1784,10 @@ fn test_nakamoto_block_static_verification() { parent_block_id: StacksBlockId([0x03; 32]), tx_merkle_root: nakamoto_tx_merkle_root, state_index_root: TrieHash([0x07; 32]), + timestamp: 8, miner_signature: MessageSignature::empty(), - signer_signature: ThresholdSignature::empty(), - signer_bitvec: BitVec::zeros(1).unwrap(), + signer_signature: vec![], + pox_treatment: BitVec::zeros(1).unwrap(), }; nakamoto_header.sign_miner(&private_key).unwrap(); @@ -1536,9 +1804,10 @@ fn test_nakamoto_block_static_verification() { parent_block_id: StacksBlockId([0x03; 32]), tx_merkle_root: nakamoto_tx_merkle_root_bad_ch, state_index_root: TrieHash([0x07; 32]), + timestamp: 8, miner_signature: MessageSignature::empty(), - signer_signature: ThresholdSignature::empty(), - signer_bitvec: BitVec::zeros(1).unwrap(), + signer_signature: vec![], + pox_treatment: BitVec::zeros(1).unwrap(), }; nakamoto_header_bad_ch.sign_miner(&private_key).unwrap(); @@ -1555,9 +1824,10 @@ fn test_nakamoto_block_static_verification() { parent_block_id: StacksBlockId([0x03; 32]), tx_merkle_root: nakamoto_tx_merkle_root_bad_miner_sig, state_index_root: TrieHash([0x07; 32]), + timestamp: 8, miner_signature: MessageSignature::empty(), - signer_signature: ThresholdSignature::empty(), - signer_bitvec: BitVec::zeros(1).unwrap(), + signer_signature: vec![], + pox_treatment: BitVec::zeros(1).unwrap(), }; nakamoto_header_bad_miner_sig .sign_miner(&private_key) @@ -1604,226 +1874,12 @@ fn test_nakamoto_block_static_verification() { .is_err()); } -/// Mock block arrivals -fn make_fork_run_with_arrivals( - sort_db: &mut SortitionDB, - start_snapshot: &BlockSnapshot, - length: u64, - bit_pattern: u8, -) -> Vec { - let mut last_snapshot = start_snapshot.clone(); - let mut new_snapshots = vec![]; - for i in last_snapshot.block_height..(last_snapshot.block_height + length) { - let snapshot = BlockSnapshot { - accumulated_coinbase_ustx: 0, - pox_valid: true, - block_height: last_snapshot.block_height + 1, - burn_header_timestamp: get_epoch_time_secs(), - burn_header_hash: BurnchainHeaderHash([(i as u8) | bit_pattern; 32]), - sortition_id: SortitionId([(i as u8) | bit_pattern; 32]), - parent_sortition_id: last_snapshot.sortition_id.clone(), - parent_burn_header_hash: last_snapshot.burn_header_hash.clone(), - consensus_hash: ConsensusHash([((i + 1) as u8) | bit_pattern; 20]), - ops_hash: OpsHash([(i as u8) | bit_pattern; 32]), - total_burn: 0, - sortition: true, - sortition_hash: SortitionHash([(i as u8) | bit_pattern; 32]), - winning_block_txid: Txid([(i as u8) | bit_pattern; 32]), - winning_stacks_block_hash: BlockHeaderHash([(i as u8) | bit_pattern; 32]), - index_root: TrieHash([0u8; 32]), - num_sortitions: last_snapshot.num_sortitions + 1, - stacks_block_accepted: false, - stacks_block_height: 0, - arrival_index: 0, - canonical_stacks_tip_height: last_snapshot.canonical_stacks_tip_height + 10, - canonical_stacks_tip_hash: BlockHeaderHash([((i + 1) as u8) | bit_pattern; 32]), - canonical_stacks_tip_consensus_hash: ConsensusHash([((i + 1) as u8) | bit_pattern; 20]), - miner_pk_hash: None, - }; - new_snapshots.push(snapshot.clone()); - { - let mut tx = SortitionHandleTx::begin(sort_db, &last_snapshot.sortition_id).unwrap(); - let _index_root = tx - .append_chain_tip_snapshot( - &last_snapshot, - &snapshot, - &vec![], - &vec![], - None, - None, - None, - ) - .unwrap(); - tx.test_update_canonical_stacks_tip( - &snapshot.sortition_id, - &snapshot.canonical_stacks_tip_consensus_hash, - &snapshot.canonical_stacks_tip_hash, - snapshot.canonical_stacks_tip_height, - ) - .unwrap(); - tx.commit().unwrap(); - } - last_snapshot = SortitionDB::get_block_snapshot(sort_db.conn(), &snapshot.sortition_id) - .unwrap() - .unwrap(); - } - new_snapshots -} - -/// Tests that getting the highest nakamoto tenure works in the presence of forks -#[test] -pub fn test_get_highest_nakamoto_tenure() { - let mut test_signers = TestSigners::default(); - let test_stackers = TestStacker::common_signing_set(&test_signers); - let mut peer = boot_nakamoto( - function_name!(), - vec![], - &mut test_signers, - &test_stackers, - None, - ); - - // extract chainstate and sortdb -- we don't need the peer anymore - let chainstate = &mut peer.stacks_node.as_mut().unwrap().chainstate; - let sort_db = peer.sortdb.as_mut().unwrap(); - - // seed a single fork of tenures - let last_snapshot = SortitionDB::get_canonical_burn_chain_tip(sort_db.conn()).unwrap(); - - // mock block arrivals - let snapshots = make_fork_run_with_arrivals(sort_db, &last_snapshot, 5, 0); - - let mut last_header: Option = None; - let mut last_tenure_change: Option = None; - let mut all_headers = vec![]; - let mut all_tenure_changes = vec![]; - for (i, sn) in snapshots.iter().enumerate() { - let block_header = NakamotoBlockHeader { - version: 0, - chain_length: sn.canonical_stacks_tip_height, - burn_spent: i as u64, - consensus_hash: sn.consensus_hash.clone(), - parent_block_id: last_header - .as_ref() - .map(|hdr| hdr.block_id()) - .unwrap_or(FIRST_STACKS_BLOCK_ID.clone()), - tx_merkle_root: Sha512Trunc256Sum([0x00; 32]), - state_index_root: TrieHash([0x00; 32]), - miner_signature: MessageSignature::empty(), - signer_signature: ThresholdSignature::empty(), - signer_bitvec: BitVec::zeros(1).unwrap(), - }; - let tenure_change = TenureChangePayload { - tenure_consensus_hash: sn.consensus_hash.clone(), - prev_tenure_consensus_hash: last_tenure_change - .as_ref() - .map(|tc| tc.tenure_consensus_hash.clone()) - .unwrap_or(last_snapshot.consensus_hash.clone()), - burn_view_consensus_hash: sn.consensus_hash.clone(), - previous_tenure_end: block_header.block_id(), - previous_tenure_blocks: 10, - cause: TenureChangeCause::BlockFound, - pubkey_hash: Hash160([0x00; 20]), - }; - - let tx = chainstate.db_tx_begin().unwrap(); - NakamotoChainState::insert_nakamoto_tenure( - &tx, - &block_header, - 1 + i as u64, - 1 + i as u64, - &tenure_change, - ) - .unwrap(); - tx.commit().unwrap(); - - all_headers.push(block_header.clone()); - all_tenure_changes.push(tenure_change.clone()); - - last_header = Some(block_header); - last_tenure_change = Some(tenure_change); - } - - // highest tenure should be the last one we inserted - let tip = SortitionDB::get_canonical_burn_chain_tip(sort_db.conn()).unwrap(); - let (stacks_ch, stacks_bhh, stacks_height) = - SortitionDB::get_canonical_stacks_chain_tip_hash_and_height(sort_db.conn()).unwrap(); - debug!("tip = {:?}", &tip); - debug!( - "stacks tip = {},{},{}", - &stacks_ch, &stacks_bhh, stacks_height - ); - let highest_tenure = - NakamotoChainState::get_highest_nakamoto_tenure(chainstate.db(), sort_db.conn()) - .unwrap() - .unwrap(); - - let last_tenure_change = last_tenure_change.unwrap(); - let last_header = last_header.unwrap(); - assert_eq!( - highest_tenure.tenure_id_consensus_hash, - last_tenure_change.tenure_consensus_hash - ); - assert_eq!( - highest_tenure.prev_tenure_id_consensus_hash, - last_tenure_change.prev_tenure_consensus_hash - ); - assert_eq!( - highest_tenure.burn_view_consensus_hash, - last_tenure_change.burn_view_consensus_hash - ); - assert_eq!(highest_tenure.cause, last_tenure_change.cause); - assert_eq!(highest_tenure.block_hash, last_header.block_hash()); - assert_eq!(highest_tenure.block_id, last_header.block_id()); - assert_eq!(highest_tenure.coinbase_height, 5); - assert_eq!(highest_tenure.tenure_index, 5); - assert_eq!(highest_tenure.num_blocks_confirmed, 10); - - // uh oh, a bitcoin fork! - let last_snapshot = snapshots[2].clone(); - let snapshots = make_fork_run(sort_db, &last_snapshot, 7, 0x80); - - let new_tip = SortitionDB::get_canonical_burn_chain_tip(sort_db.conn()).unwrap(); - debug!("tip = {:?}", &new_tip); - debug!( - "stacks tip = {},{},{}", - &stacks_ch, &stacks_bhh, stacks_height - ); - - // new tip doesn't include the last two tenures - let highest_tenure = - NakamotoChainState::get_highest_nakamoto_tenure(chainstate.db(), sort_db.conn()) - .unwrap() - .unwrap(); - let last_tenure_change = &all_tenure_changes[2]; - let last_header = &all_headers[2]; - assert_eq!( - highest_tenure.tenure_id_consensus_hash, - last_tenure_change.tenure_consensus_hash - ); - assert_eq!( - highest_tenure.prev_tenure_id_consensus_hash, - last_tenure_change.prev_tenure_consensus_hash - ); - assert_eq!( - highest_tenure.burn_view_consensus_hash, - last_tenure_change.burn_view_consensus_hash - ); - assert_eq!(highest_tenure.cause, last_tenure_change.cause); - assert_eq!(highest_tenure.block_hash, last_header.block_hash()); - assert_eq!(highest_tenure.block_id, last_header.block_id()); - assert_eq!(highest_tenure.coinbase_height, 3); - assert_eq!(highest_tenure.tenure_index, 3); - assert_eq!(highest_tenure.num_blocks_confirmed, 10); -} - /// Test that we can generate a .miners stackerdb config. /// The config must be stable across sortitions -- if a miner is given slot i, then it continues /// to have slot i in subsequent sortitions. #[test] fn test_make_miners_stackerdb_config() { - let mut test_signers = TestSigners::default(); - let test_stackers = TestStacker::common_signing_set(&test_signers); + let (mut test_signers, test_stackers) = TestStacker::common_signing_set(); let mut peer = boot_nakamoto( function_name!(), vec![], @@ -1945,6 +2001,7 @@ fn test_make_miners_stackerdb_config() { block_height: snapshot.block_height, burn_parent_modulus: ((snapshot.block_height - 1) % BURN_BLOCK_MINED_AT_MODULUS) as u8, burn_header_hash: snapshot.burn_header_hash.clone(), + treatment: vec![], }; let winning_ops = if i == 0 { @@ -1992,8 +2049,9 @@ fn test_make_miners_stackerdb_config() { let tip = SortitionDB::get_canonical_burn_chain_tip(sort_db.conn()).unwrap(); // check the stackerdb config as of this chain tip - let stackerdb_config = - NakamotoChainState::make_miners_stackerdb_config(sort_db, &tip).unwrap(); + let stackerdb_config = NakamotoChainState::make_miners_stackerdb_config(sort_db, &tip) + .unwrap() + .0; eprintln!( "stackerdb_config at i = {} (sorition? {}): {:?}", &i, sortition, &stackerdb_config @@ -2010,40 +2068,36 @@ fn test_make_miners_stackerdb_config() { parent_block_id: StacksBlockId([0x05; 32]), tx_merkle_root: Sha512Trunc256Sum([0x06; 32]), state_index_root: TrieHash([0x07; 32]), + timestamp: 8, miner_signature: MessageSignature::empty(), - signer_signature: ThresholdSignature::empty(), - signer_bitvec: BitVec::zeros(1).unwrap(), + signer_signature: vec![], + pox_treatment: BitVec::zeros(1).unwrap(), }; let block = NakamotoBlock { header, txs: vec![], }; let tip = SortitionDB::get_canonical_burn_chain_tip(sort_db.conn()).unwrap(); + let miner_privkey = &miner_keys[i]; + let miner_pubkey = StacksPublicKey::from_private(miner_privkey); + let slot_id = NakamotoChainState::get_miner_slot(&sort_db, &tip, &tip.consensus_hash) + .expect("Failed to get miner slot"); if sortition { - let chunk = NakamotoBlockBuilder::make_stackerdb_block_proposal( - &sort_db, - &tip, - &stackerdbs, - &block, - &miner_keys[i], - &miners_contract_id, - ) - .unwrap() - .unwrap(); + let slot_id = slot_id.expect("No miner slot exists for this miner").start; + let slot_version = stackerdbs + .get_slot_version(&miners_contract_id, slot_id) + .expect("Failed to get slot version") + .unwrap_or(0) + .saturating_add(1); + let block_bytes = block.serialize_to_vec(); + let mut chunk = StackerDBChunkData::new(slot_id, slot_version, block_bytes); + chunk.sign(&miner_keys[i]).expect("Failed to sign chunk"); assert_eq!(chunk.slot_version, 1); assert_eq!(chunk.data, block.serialize_to_vec()); stackerdb_chunks.push(chunk); } else { - assert!(NakamotoBlockBuilder::make_stackerdb_block_proposal( - &sort_db, - &tip, - &stackerdbs, - &block, - &miner_keys[i], - &miners_contract_id, - ) - .unwrap() - .is_none()); + // We are not a miner anymore and should not have any slot + assert!(slot_id.is_none()); } } // miners are "stable" across snapshots @@ -2843,3 +2897,390 @@ fn filter_one_transaction_per_signer_duplicate_nonces() { assert_eq!(filtered_txs.len(), 1); assert!(filtered_txs.contains(&txs.first().expect("failed to get first tx"))); } + +pub mod nakamoto_block_signatures { + use super::*; + + /// Helper function make a reward set with (PrivateKey, weight) tuples + fn make_reward_set(signers: Vec<(Secp256k1PrivateKey, u32)>) -> RewardSet { + let mut reward_set = RewardSet::empty(); + reward_set.signers = Some( + signers + .iter() + .map(|(s, w)| { + let mut signing_key = [0u8; 33]; + signing_key.copy_from_slice( + &Secp256k1PublicKey::from_private(s) + .to_bytes_compressed() + .as_slice(), + ); + NakamotoSignerEntry { + signing_key, + stacked_amt: 100_u128, + weight: *w, + } + }) + .collect(), + ); + reward_set + } + + #[test] + // Test that signatures succeed with exactly 70% of the votes + pub fn test_exactly_enough_votes() { + let signers = vec![ + (Secp256k1PrivateKey::default(), 35), + (Secp256k1PrivateKey::default(), 35), + (Secp256k1PrivateKey::default(), 30), + ]; + let reward_set = make_reward_set(signers.clone()); + + let mut header = NakamotoBlockHeader::empty(); + + // Sign the block with the first two signers + let message = header.signer_signature_hash().0; + let signer_signature = signers + .iter() + .take(2) + .map(|(s, _)| s.sign(&message).expect("Failed to sign block sighash")) + .collect::>(); + + header.signer_signature = signer_signature; + + header + .verify_signer_signatures(&reward_set) + .expect("Failed to verify signatures"); + } + + #[test] + /// Test that signatures fail with just under 70% of the votes + pub fn test_just_not_enough_votes() { + let signers = vec![ + (Secp256k1PrivateKey::default(), 3500), + (Secp256k1PrivateKey::default(), 3499), + (Secp256k1PrivateKey::default(), 3001), + ]; + let reward_set = make_reward_set(signers.clone()); + + let mut header = NakamotoBlockHeader::empty(); + + // Sign the block with the first two signers + let message = header.signer_signature_hash().0; + let signer_signature = signers + .iter() + .take(2) + .map(|(s, _)| s.sign(&message).expect("Failed to sign block sighash")) + .collect::>(); + + header.signer_signature = signer_signature; + + match header.verify_signer_signatures(&reward_set) { + Ok(_) => panic!("Expected insufficient signatures to fail"), + Err(ChainstateError::InvalidStacksBlock(msg)) => { + assert!(msg.contains("Not enough signatures")); + } + _ => panic!("Expected InvalidStacksBlock error"), + } + } + + #[test] + /// Base success case - 3 signers of equal weight, all signing the block + pub fn test_nakamoto_block_verify_signatures() { + let signers = vec![ + Secp256k1PrivateKey::default(), + Secp256k1PrivateKey::default(), + Secp256k1PrivateKey::default(), + ]; + + let reward_set = make_reward_set(signers.iter().map(|s| (s.clone(), 100)).collect()); + + let mut header = NakamotoBlockHeader::empty(); + + // Sign the block sighash for each signer + + let message = header.signer_signature_hash().0; + let signer_signature = signers + .iter() + .map(|s| s.sign(&message).expect("Failed to sign block sighash")) + .collect::>(); + + header.signer_signature = signer_signature; + + header + .verify_signer_signatures(&reward_set) + .expect("Failed to verify signatures"); + // assert!(&header.verify_signer_signatures(&reward_set).is_ok()); + } + + #[test] + /// Fully signed block, but not in order + fn test_out_of_order_signer_signatures() { + let signers = vec![ + (Secp256k1PrivateKey::default(), 100), + (Secp256k1PrivateKey::default(), 100), + (Secp256k1PrivateKey::default(), 100), + ]; + let reward_set = make_reward_set(signers.clone()); + + let mut header = NakamotoBlockHeader::empty(); + + // Sign the block for each signer, but in reverse + let message = header.signer_signature_hash().0; + let signer_signature = signers + .iter() + .rev() + .map(|(s, _)| s.sign(&message).expect("Failed to sign block sighash")) + .collect::>(); + + header.signer_signature = signer_signature; + + match header.verify_signer_signatures(&reward_set) { + Ok(_) => panic!("Expected out of order signatures to fail"), + Err(ChainstateError::InvalidStacksBlock(msg)) => { + assert!(msg.contains("out of order")); + } + _ => panic!("Expected InvalidStacksBlock error"), + } + } + + #[test] + // Test with 3 equal signers, and only two sign + fn test_insufficient_signatures() { + let signers = vec![ + (Secp256k1PrivateKey::default(), 100), + (Secp256k1PrivateKey::default(), 100), + (Secp256k1PrivateKey::default(), 100), + ]; + let reward_set = make_reward_set(signers.clone()); + + let mut header = NakamotoBlockHeader::empty(); + + // Sign the block with just the first two signers + let message = header.signer_signature_hash().0; + let signer_signature = signers + .iter() + .take(2) + .map(|(s, _)| s.sign(&message).expect("Failed to sign block sighash")) + .collect::>(); + + header.signer_signature = signer_signature; + + match header.verify_signer_signatures(&reward_set) { + Ok(_) => panic!("Expected insufficient signatures to fail"), + Err(ChainstateError::InvalidStacksBlock(msg)) => { + assert!(msg.contains("Not enough signatures")); + } + _ => panic!("Expected InvalidStacksBlock error"), + } + } + + #[test] + // Test with 4 signers, but one has 75% weight. Only the whale signs + // and the block is valid + fn test_single_signature_threshold() { + let signers = vec![ + (Secp256k1PrivateKey::default(), 75), + (Secp256k1PrivateKey::default(), 10), + (Secp256k1PrivateKey::default(), 5), + (Secp256k1PrivateKey::default(), 10), + ]; + let reward_set = make_reward_set(signers.clone()); + + let mut header = NakamotoBlockHeader::empty(); + + // Sign the block with just the whale + let message = header.signer_signature_hash().0; + let signer_signature = signers + .iter() + .take(1) + .map(|(s, _)| s.sign(&message).expect("Failed to sign block sighash")) + .collect::>(); + + header.signer_signature = signer_signature; + + header + .verify_signer_signatures(&reward_set) + .expect("Failed to verify signatures"); + } + + #[test] + // Test with a signature that didn't come from the signer set + fn test_invalid_signer() { + let signers = vec![(Secp256k1PrivateKey::default(), 100)]; + + let reward_set = make_reward_set(signers.clone()); + + let mut header = NakamotoBlockHeader::empty(); + + let message = header.signer_signature_hash().0; + + // Sign with all signers + let mut signer_signature = signers + .iter() + .map(|(s, _)| s.sign(&message).expect("Failed to sign block sighash")) + .collect::>(); + + let invalid_signature = Secp256k1PrivateKey::default() + .sign(&message) + .expect("Failed to sign block sighash"); + + signer_signature.push(invalid_signature); + + header.signer_signature = signer_signature; + + match header.verify_signer_signatures(&reward_set) { + Ok(_) => panic!("Expected invalid signature to fail"), + Err(ChainstateError::InvalidStacksBlock(msg)) => { + assert!(msg.contains("not found in the reward set")); + } + _ => panic!("Expected InvalidStacksBlock error"), + } + } + + #[test] + fn test_duplicate_signatures() { + let signers = vec![ + (Secp256k1PrivateKey::default(), 100), + (Secp256k1PrivateKey::default(), 100), + (Secp256k1PrivateKey::default(), 100), + ]; + let reward_set = make_reward_set(signers.clone()); + + let mut header = NakamotoBlockHeader::empty(); + + let message = header.signer_signature_hash().0; + + // First, sign with the first 2 signers + let mut signer_signature = signers + .iter() + .take(2) + .map(|(s, _)| s.sign(&message).expect("Failed to sign block sighash")) + .collect::>(); + + // Sign again with the first signer + let duplicate_signature = signers[0] + .0 + .sign(&message) + .expect("Failed to sign block sighash"); + + signer_signature.push(duplicate_signature); + + header.signer_signature = signer_signature; + + match header.verify_signer_signatures(&reward_set) { + Ok(_) => panic!("Expected duplicate signature to fail"), + Err(ChainstateError::InvalidStacksBlock(msg)) => { + assert!(msg.contains("Signatures are out of order")); + } + _ => panic!("Expected InvalidStacksBlock error"), + } + } + + #[test] + // Test where a signature used a different message + fn test_signature_invalid_message() { + let signers = vec![ + (Secp256k1PrivateKey::default(), 100), + (Secp256k1PrivateKey::default(), 100), + (Secp256k1PrivateKey::default(), 100), + (Secp256k1PrivateKey::default(), 100), + ]; + + let reward_set = make_reward_set(signers.clone()); + + let mut header = NakamotoBlockHeader::empty(); + + let message = header.signer_signature_hash().0; + + let mut signer_signature = signers + .iter() + .take(3) + .map(|(s, _)| s.sign(&message).expect("Failed to sign block sighash")) + .collect::>(); + + // With the 4th signer, use a junk message + let message = [0u8; 32]; + + let bad_signature = signers[3] + .0 + .sign(&message) + .expect("Failed to sign block sighash"); + + signer_signature.push(bad_signature); + + header.signer_signature = signer_signature; + + match header.verify_signer_signatures(&reward_set) { + Ok(_) => panic!("Expected invalid message to fail"), + Err(ChainstateError::InvalidStacksBlock(msg)) => {} + _ => panic!("Expected InvalidStacksBlock error"), + } + } + + #[test] + // Test where a signature is not recoverable + fn test_unrecoverable_signature() { + let signers = vec![ + (Secp256k1PrivateKey::default(), 100), + (Secp256k1PrivateKey::default(), 100), + (Secp256k1PrivateKey::default(), 100), + (Secp256k1PrivateKey::default(), 100), + ]; + + let reward_set = make_reward_set(signers.clone()); + + let mut header = NakamotoBlockHeader::empty(); + + let message = header.signer_signature_hash().0; + + let mut signer_signature = signers + .iter() + .take(3) + .map(|(s, _)| s.sign(&message).expect("Failed to sign block sighash")) + .collect::>(); + + // Now append an unrecoverable signature + signer_signature.push(MessageSignature::empty()); + + header.signer_signature = signer_signature; + + match header.verify_signer_signatures(&reward_set) { + Ok(_) => panic!("Expected invalid message to fail"), + Err(ChainstateError::InvalidStacksBlock(msg)) => { + if !msg.contains("Unable to recover public key") { + panic!("Unexpected error msg: {}", msg); + } + } + _ => panic!("Expected InvalidStacksBlock error"), + } + } + + #[test] + pub fn test_compute_voting_weight_threshold() { + assert_eq!( + NakamotoBlockHeader::compute_voting_weight_threshold(100_u32).unwrap(), + 70_u32, + ); + + assert_eq!( + NakamotoBlockHeader::compute_voting_weight_threshold(10_u32).unwrap(), + 7_u32, + ); + + assert_eq!( + NakamotoBlockHeader::compute_voting_weight_threshold(3000_u32).unwrap(), + 2100_u32, + ); + + assert_eq!( + NakamotoBlockHeader::compute_voting_weight_threshold(4000_u32).unwrap(), + 2800_u32, + ); + + // Round-up check + assert_eq!( + NakamotoBlockHeader::compute_voting_weight_threshold(511_u32).unwrap(), + 358_u32, + ); + } +} diff --git a/stackslib/src/chainstate/nakamoto/tests/node.rs b/stackslib/src/chainstate/nakamoto/tests/node.rs index 570b0cc3d3d..d3f190de1fe 100644 --- a/stackslib/src/chainstate/nakamoto/tests/node.rs +++ b/stackslib/src/chainstate/nakamoto/tests/node.rs @@ -28,8 +28,11 @@ use rand::{CryptoRng, RngCore, SeedableRng}; use rand_chacha::ChaCha20Rng; use stacks_common::address::*; use stacks_common::consts::{FIRST_BURNCHAIN_CONSENSUS_HASH, FIRST_STACKS_BLOCK_HASH}; -use stacks_common::types::chainstate::{BlockHeaderHash, SortitionId, StacksBlockId, VRFSeed}; +use stacks_common::types::chainstate::{ + BlockHeaderHash, SortitionId, StacksAddress, StacksBlockId, VRFSeed, +}; use stacks_common::util::hash::Hash160; +use stacks_common::util::secp256k1::Secp256k1PrivateKey; use stacks_common::util::sleep_ms; use stacks_common::util::vrf::{VRFProof, VRFPublicKey}; use wsts::curve::point::Point; @@ -43,11 +46,15 @@ use crate::chainstate::burn::operations::{ BlockstackOperationType, LeaderBlockCommitOp, LeaderKeyRegisterOp, }; use crate::chainstate::burn::*; +use crate::chainstate::coordinator::tests::NullEventDispatcher; use crate::chainstate::coordinator::{ ChainsCoordinator, Error as CoordinatorError, OnChainRewardSetProvider, }; -use crate::chainstate::nakamoto::coordinator::get_nakamoto_next_recipients; +use crate::chainstate::nakamoto::coordinator::{ + get_nakamoto_next_recipients, load_nakamoto_reward_set, +}; use crate::chainstate::nakamoto::miner::NakamotoBlockBuilder; +use crate::chainstate::nakamoto::staging_blocks::NakamotoBlockObtainMethod; use crate::chainstate::nakamoto::test_signers::TestSigners; use crate::chainstate::nakamoto::tests::get_account; use crate::chainstate::nakamoto::{NakamotoBlock, NakamotoBlockHeader, NakamotoChainState}; @@ -70,9 +77,16 @@ use crate::util_lib::db::Error as db_error; #[derive(Debug, Clone)] pub struct TestStacker { + /// Key used to send stacking transactions pub stacker_private_key: StacksPrivateKey, + /// Signer key for this stacker pub signer_private_key: StacksPrivateKey, + /// amount of uSTX stacked pub amount: u128, + /// PoX address to stack to (defaults to a fixed PoX address if not given) + pub pox_addr: Option, + /// Maximum amount to stack (defaults to u128::MAX) + pub max_amount: Option, } impl TestStacker { @@ -86,6 +100,8 @@ impl TestStacker { stacker_private_key, signer_private_key, amount: 1_000_000_000_000_000_000, + pox_addr: None, + max_amount: None, } } @@ -95,17 +111,65 @@ impl TestStacker { /// make a set of stackers who will share a single signing key and stack with /// `Self::DEFAULT_STACKER_AMOUNT` - pub fn common_signing_set(test_signers: &TestSigners) -> Vec { - let mut signing_key_seed = test_signers.num_keys.to_be_bytes().to_vec(); + pub fn common_signing_set() -> (TestSigners, Vec) { + let num_keys: u32 = 10; + let mut signing_key_seed = num_keys.to_be_bytes().to_vec(); signing_key_seed.extend_from_slice(&[1, 1, 1, 1]); let signing_key = StacksPrivateKey::from_seed(signing_key_seed.as_slice()); - (0..test_signers.num_keys) + let stackers = (0..num_keys) .map(|index| TestStacker { signer_private_key: signing_key.clone(), stacker_private_key: StacksPrivateKey::from_seed(&index.to_be_bytes()), amount: Self::DEFAULT_STACKER_AMOUNT, + pox_addr: None, + max_amount: None, }) - .collect() + .collect::>(); + + let test_signers = TestSigners::new(vec![signing_key]); + (test_signers, stackers) + } + + /// make a set of stackers who will share a set of keys and stack with + /// `Self::DEFAULT_STACKER_AMOUNT` + /// + /// `key_distribution.len()` stackers will be created + /// `key_distribution[i]` is the ID of key that the ith stacker will use. + /// The ID is opaque -- it's used as a seed to generate the key. + /// Each set of stackers with the same key ID will be given its own PoX address + pub fn multi_signing_set(key_distribution: &[u8]) -> (TestSigners, Vec) { + let stackers = key_distribution + .iter() + .enumerate() + .map(|(index, key_seed)| { + let signing_key = StacksPrivateKey::from_seed(&[*key_seed]); + let pox_key = StacksPrivateKey::from_seed(&[*key_seed, *key_seed]); + let addr = StacksAddress::p2pkh(false, &StacksPublicKey::from_private(&pox_key)); + let pox_addr = + PoxAddress::from_legacy(AddressHashMode::SerializeP2PKH, addr.bytes.clone()); + + TestStacker { + signer_private_key: signing_key.clone(), + stacker_private_key: StacksPrivateKey::from_seed(&index.to_be_bytes()), + amount: Self::DEFAULT_STACKER_AMOUNT, + pox_addr: Some(pox_addr), + max_amount: Some(u128::MAX - u128::try_from(index).unwrap()), + } + }) + .collect::>(); + + // N.B. the .to_hex() is needed because Secp256k1PrivateKey does not implement Hash + let unique_signers: HashSet<_> = stackers + .iter() + .map(|st| st.signer_private_key.to_hex()) + .collect(); + let test_signers = TestSigners::new( + unique_signers + .into_iter() + .map(|sk_hex| Secp256k1PrivateKey::from_hex(&sk_hex).unwrap()) + .collect(), + ); + (test_signers, stackers) } } @@ -272,10 +336,9 @@ impl TestStacksNode { parent_block_snapshot_opt: Option<&BlockSnapshot>, expect_success: bool, ) -> LeaderBlockCommitOp { - test_debug!( + info!( "Miner {}: Commit to Nakamoto tenure starting at {}", - miner.id, - &last_tenure_id, + miner.id, &last_tenure_id, ); let parent_block = @@ -283,7 +346,8 @@ impl TestStacksNode { .unwrap() .unwrap(); let vrf_proof = NakamotoChainState::get_block_vrf_proof( - self.chainstate.db(), + &mut self.chainstate.index_conn(), + &parent_block.index_block_hash(), &parent_block.consensus_hash, ) .unwrap() @@ -333,6 +397,17 @@ impl TestStacksNode { /// Record the nakamoto blocks as a new tenure pub fn add_nakamoto_tenure_blocks(&mut self, tenure_blocks: Vec) { + if let Some(last_tenure) = self.nakamoto_blocks.last_mut() { + if tenure_blocks.len() > 0 { + // this tenure is overwriting the last tenure + if last_tenure.first().unwrap().header.consensus_hash + == tenure_blocks.first().unwrap().header.consensus_hash + { + *last_tenure = tenure_blocks; + return; + } + } + } self.nakamoto_blocks.push(tenure_blocks); } @@ -380,11 +455,12 @@ impl TestStacksNode { .unwrap(); test_debug!( - "Work in {} {} for Nakamoto parent: {},{}", + "Work in {} {} for Nakamoto parent: {},{}. Last tenure ID is {}", burn_block.block_height, burn_block.parent_snapshot.burn_header_hash, parent_sortition.total_burn, last_parent.header.chain_length + 1, + &parent_tenure_id, ); (parent_tenure_id, parent_sortition) @@ -414,11 +490,12 @@ impl TestStacksNode { let parent_tenure_id = parent_chain_tip.index_block_hash(); test_debug!( - "Work in {} {} for Stacks 2.x parent: {},{}", + "Work in {} {} for Stacks 2.x parent: {},{}. Last tenure ID is {}", burn_block.block_height, burn_block.parent_snapshot.burn_header_hash, parent_stacks_block_snapshot.total_burn, parent_chain_tip.anchored_header.height(), + &parent_tenure_id, ); (parent_tenure_id, parent_stacks_block_snapshot) @@ -436,13 +513,22 @@ impl TestStacksNode { // building atop nakamoto let tenure_len = NakamotoChainState::get_nakamoto_tenure_length( self.chainstate.db(), - &hdr.consensus_hash, + &hdr.index_block_hash(), ) .unwrap(); - debug!("Tenure length of {} is {}", &hdr.consensus_hash, tenure_len); + debug!( + "Tenure length of Nakamoto tenure {} is {}; tipped at {}", + &hdr.consensus_hash, + tenure_len, + &hdr.index_block_hash() + ); (hdr.index_block_hash(), hdr.consensus_hash, tenure_len) } else { // building atop epoch2 + debug!( + "Tenure length of epoch2 tenure {} is {}; tipped at {}", + &parent_block_snapshot.consensus_hash, 1, &last_tenure_id + ); ( last_tenure_id, parent_block_snapshot.consensus_hash.clone(), @@ -461,6 +547,8 @@ impl TestStacksNode { pubkey_hash: miner.nakamoto_miner_hash160(), }; + test_debug!("TenureChangePayload: {:?}", &tenure_change_payload); + let block_commit_op = self.make_nakamoto_tenure_commitment( sortdb, burn_block, @@ -476,11 +564,22 @@ impl TestStacksNode { } /// Construct or extend a full Nakamoto tenure with the given block builder. + /// After block assembly, invoke `after_block` before signing and then processing. + /// If `after_block` returns false, do not attempt to process the block, instead just + /// add it to the result Vec and exit the block building loop (the block builder cannot + /// build any subsequent blocks without processing the prior block) + /// /// The first block will contain a coinbase, if coinbase is Some(..) /// Process the blocks via the chains coordinator as we produce them. - pub fn make_nakamoto_tenure_blocks<'a, F>( + /// + /// Returns a list of + /// * the block + /// * its size + /// * its execution cost + /// * a list of malleablized blocks with the same sighash + pub fn make_nakamoto_tenure_blocks<'a, S, F, G>( chainstate: &mut StacksChainState, - sortdb: &SortitionDB, + sortdb: &mut SortitionDB, miner: &mut TestMiner, signers: &mut TestSigners, tenure_id_consensus_hash: &ConsensusHash, @@ -495,17 +594,22 @@ impl TestStacksNode { (), BitcoinIndexer, >, + mut miner_setup: S, mut block_builder: F, - ) -> Vec<(NakamotoBlock, u64, ExecutionCost)> + mut after_block: G, + ) -> Vec<(NakamotoBlock, u64, ExecutionCost, Vec)> where + S: FnMut(&mut NakamotoBlockBuilder), F: FnMut( &mut TestMiner, &mut StacksChainState, &SortitionDB, &[(NakamotoBlock, u64, ExecutionCost)], ) -> Vec, + G: FnMut(&mut NakamotoBlock) -> bool, { let mut blocks = vec![]; + let mut all_malleablized_blocks = vec![]; let mut block_count = 0; loop { let mut txs = vec![]; @@ -522,17 +626,65 @@ impl TestStacksNode { break; } - let parent_tip_opt = - NakamotoChainState::get_canonical_block_header(chainstate.db(), &sortdb).unwrap(); + // there may be a tenure-extend here. Go find it if so + let mut parent_id_opt = None; + for tx in txs.iter() { + if let TransactionPayload::TenureChange(payload) = &tx.payload { + parent_id_opt = Some(payload.previous_tenure_end.clone()); + } + } + + let parent_tip_opt = if let Some(parent_id) = parent_id_opt { + if let Some(nakamoto_parent) = + NakamotoChainState::get_block_header(chainstate.db(), &parent_id).unwrap() + { + debug!( + "Use parent tip identified by produced TenureChange ({})", + &parent_id + ); + Some(nakamoto_parent) + } else { + warn!("Produced Tenure change transaction does not point to a real block"); + NakamotoChainState::get_canonical_block_header(chainstate.db(), &sortdb) + .unwrap() + } + } else if let Some(tenure_change) = tenure_change.as_ref() { + // make sure parent tip is consistent with a tenure change + if let TransactionPayload::TenureChange(payload) = &tenure_change.payload { + if let Some(nakamoto_parent) = NakamotoChainState::get_block_header( + chainstate.db(), + &payload.previous_tenure_end, + ) + .unwrap() + { + debug!( + "Use parent tip identified by given TenureChange ({})", + &payload.previous_tenure_end + ); + Some(nakamoto_parent) + } else { + debug!("Use parent tip identified by canonical tip pointer (no parent block {})", &payload.previous_tenure_end); + NakamotoChainState::get_canonical_block_header(chainstate.db(), &sortdb) + .unwrap() + } + } else { + panic!("Tenure change transaction does not have a TenureChange payload"); + } + } else { + NakamotoChainState::get_canonical_block_header(chainstate.db(), &sortdb).unwrap() + }; + let burn_tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); debug!( - "Build Nakamoto block in tenure {} sortition {}", - &tenure_id_consensus_hash, &burn_tip.consensus_hash + "Build Nakamoto block in tenure {} sortition {} parent_tip {:?}", + &tenure_id_consensus_hash, + &burn_tip.consensus_hash, + &parent_tip_opt.clone().map(|blk| blk.index_block_hash()) ); // make a block - let builder = if let Some(parent_tip) = parent_tip_opt { + let mut builder = if let Some(parent_tip) = parent_tip_opt { NakamotoBlockBuilder::new( &parent_tip, tenure_id_consensus_hash, @@ -547,6 +699,7 @@ impl TestStacksNode { } else { None }, + 1, ) .unwrap() } else { @@ -555,13 +708,19 @@ impl TestStacksNode { &coinbase.clone().unwrap(), ) }; + miner_setup(&mut builder); tenure_change = None; coinbase = None; - let (mut nakamoto_block, size, cost) = - Self::make_nakamoto_block_from_txs(builder, chainstate, &sortdb.index_conn(), txs) - .unwrap(); + let (mut nakamoto_block, size, cost) = Self::make_nakamoto_block_from_txs( + builder, + chainstate, + &sortdb.index_handle_at_tip(), + txs, + ) + .unwrap(); + let try_to_process = after_block(&mut nakamoto_block); miner.sign_nakamoto_block(&mut nakamoto_block); let tenure_sn = @@ -573,19 +732,43 @@ impl TestStacksNode { .block_height_to_reward_cycle(sortdb.first_block_height, tenure_sn.block_height) .unwrap(); + // Get the reward set + let sort_tip_sn = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); + let reward_set = load_nakamoto_reward_set( + miner + .burnchain + .pox_reward_cycle(sort_tip_sn.block_height) + .expect("FATAL: no reward cycle for sortition"), + &sort_tip_sn.sortition_id, + &miner.burnchain, + chainstate, + &nakamoto_block.header.parent_block_id, + sortdb, + &OnChainRewardSetProvider::new(), + ) + .expect("Failed to load reward set") + .expect("Expected a reward set") + .0 + .known_selected_anchor_block_owned() + .expect("Unknown reward set"); + test_debug!( "Signing Nakamoto block {} in tenure {} with key in cycle {}", nakamoto_block.block_id(), tenure_id_consensus_hash, cycle ); - signers.sign_nakamoto_block(&mut nakamoto_block, cycle); + + signers.sign_block_with_reward_set(&mut nakamoto_block, &reward_set); let block_id = nakamoto_block.block_id(); - debug!( - "Process Nakamoto block {} ({:?}", - &block_id, &nakamoto_block.header - ); + + if try_to_process { + debug!( + "Process Nakamoto block {} ({:?}", + &block_id, &nakamoto_block.header + ); + } debug!( "Nakamoto block {} txs: {:?}", &block_id, &nakamoto_block.txs @@ -593,51 +776,123 @@ impl TestStacksNode { let sort_tip = SortitionDB::get_canonical_sortition_tip(sortdb.conn()).unwrap(); let mut sort_handle = sortdb.index_handle(&sort_tip); - info!("Processing the new nakamoto block"); - let accepted = match Relayer::process_new_nakamoto_block( - sortdb, - &mut sort_handle, - chainstate, - nakamoto_block.clone(), - None, - ) { - Ok(accepted) => accepted, - Err(e) => { - error!( - "Failed to process nakamoto block: {:?}\n{:?}", - &e, &nakamoto_block + let stacks_tip = sort_handle.get_nakamoto_tip_block_id().unwrap().unwrap(); + + let mut block_to_store = nakamoto_block.clone(); + let mut processed_blocks = vec![]; + let mut malleablized_blocks = vec![]; + loop { + // don't process if we don't have enough signatures + if let Err(e) = block_to_store.header.verify_signer_signatures(&reward_set) { + info!( + "Will stop processing malleablized blocks for {}: {:?}", + &block_id, &e ); - panic!(); + break; + } + if block_to_store.block_id() == block_id { + info!("Processing the new nakamoto block {}", &block_id); + } else { + info!( + "Processing the new malleablized nakamoto block {}, original is {}", + &block_to_store.block_id(), + &block_id + ); + malleablized_blocks.push(block_to_store.clone()); } - }; - if accepted { - test_debug!("Accepted Nakamoto block {}", &block_id); - coord.handle_new_nakamoto_stacks_block().unwrap(); - // confirm that the chain tip advanced - let stacks_chain_tip = - NakamotoChainState::get_canonical_block_header(chainstate.db(), &sortdb) + let accepted = if try_to_process { + match Relayer::process_new_nakamoto_block( + &miner.burnchain, + sortdb, + &mut sort_handle, + chainstate, + &stacks_tip, + &block_to_store, + None, + NakamotoBlockObtainMethod::Pushed, + ) { + Ok(accepted) => accepted, + Err(e) => { + error!( + "Failed to process nakamoto block: {:?}\n{:?}", + &e, &nakamoto_block + ); + panic!(); + } + } + } else { + false + }; + if accepted { + test_debug!("Accepted Nakamoto block {}", &block_to_store.block_id()); + coord.handle_new_nakamoto_stacks_block().unwrap(); + processed_blocks.push(block_to_store.clone()); + + if block_to_store.block_id() == block_id { + // confirm that the chain tip advanced + let stacks_chain_tip = NakamotoChainState::get_canonical_block_header( + chainstate.db(), + &sortdb, + ) .unwrap() .unwrap(); - let nakamoto_chain_tip = stacks_chain_tip - .anchored_header - .as_stacks_nakamoto() - .expect("FATAL: chain tip is not a Nakamoto block"); - assert_eq!(nakamoto_chain_tip, &nakamoto_block.header); - } else { - test_debug!("Did NOT accept Nakamoto block {}", &block_id); + let nakamoto_chain_tip = stacks_chain_tip + .anchored_header + .as_stacks_nakamoto() + .expect("FATAL: chain tip is not a Nakamoto block"); + assert_eq!(nakamoto_chain_tip, &nakamoto_block.header); + } + } else { + if try_to_process { + test_debug!( + "Did NOT accept Nakamoto block {}", + &block_to_store.block_id() + ); + break; + } else { + test_debug!( + "Test will NOT process Nakamoto block {}", + &block_to_store.block_id() + ); + } + } + + let num_sigs = block_to_store.header.signer_signature.len(); + + // force this block to have a different sighash, in addition to different + // signatures, so that both blocks are valid at a consensus level + block_to_store.header.version += 1; + block_to_store.header.signer_signature.clear(); + + miner.sign_nakamoto_block(&mut block_to_store); + signers.sign_block_with_reward_set(&mut block_to_store, &reward_set); + + while block_to_store.header.signer_signature.len() >= num_sigs { + block_to_store.header.signer_signature.pop(); + } } + for processed_block in processed_blocks { + debug!("Begin check Nakamoto block {}", &processed_block.block_id()); + TestPeer::check_processed_nakamoto_block(sortdb, chainstate, &processed_block); + debug!("End check Nakamoto block {}", &processed_block.block_id()); + } blocks.push((nakamoto_block, size, cost)); + all_malleablized_blocks.push(malleablized_blocks); block_count += 1; } blocks + .into_iter() + .zip(all_malleablized_blocks.into_iter()) + .map(|((blk, sz, cost), mals)| (blk, sz, cost, mals)) + .collect() } pub fn make_nakamoto_block_from_txs( mut builder: NakamotoBlockBuilder, chainstate_handle: &StacksChainState, - burn_dbconn: &SortitionDBConn, + burn_dbconn: &SortitionHandleConn, mut txs: Vec, ) -> Result<(NakamotoBlock, u64, ExecutionCost), ChainstateError> { use clarity::vm::ast::ASTRules; @@ -898,7 +1153,13 @@ impl<'a> TestPeer<'a> { } // patch in reward set info - match get_nakamoto_next_recipients(&tip, &mut sortdb, &self.config.burnchain) { + match get_nakamoto_next_recipients( + &tip, + &mut sortdb, + &mut stacks_node.chainstate, + &tenure_change_payload.previous_tenure_end, + &self.config.burnchain, + ) { Ok(recipients) => { block_commit_op.commit_outs = match recipients { Some(info) => { @@ -973,6 +1234,40 @@ impl<'a> TestPeer<'a> { proof } + pub fn try_process_block(&mut self, block: &NakamotoBlock) -> Result { + let mut sort_handle = self.sortdb.as_ref().unwrap().index_handle_at_tip(); + let stacks_tip = sort_handle.get_nakamoto_tip_block_id().unwrap().unwrap(); + let accepted = Relayer::process_new_nakamoto_block( + &self.config.burnchain, + self.sortdb.as_ref().unwrap(), + &mut sort_handle, + &mut self.stacks_node.as_mut().unwrap().chainstate, + &stacks_tip, + block, + None, + NakamotoBlockObtainMethod::Pushed, + )?; + if !accepted { + return Ok(false); + } + let sort_tip = SortitionDB::get_canonical_sortition_tip(self.sortdb().conn()).unwrap(); + let Some(block_receipt) = + NakamotoChainState::process_next_nakamoto_block::( + &mut self.stacks_node.as_mut().unwrap().chainstate, + self.sortdb.as_mut().unwrap(), + &sort_tip, + None, + )? + else { + return Ok(false); + }; + if block_receipt.header.index_block_hash() == block.block_id() { + Ok(true) + } else { + Ok(false) + } + } + /// Produce and process a Nakamoto tenure, after processing the block-commit from /// begin_nakamoto_tenure(). You'd process the burnchain ops from begin_nakamoto_tenure(), /// take the consensus hash, and feed it in here. @@ -992,17 +1287,51 @@ impl<'a> TestPeer<'a> { &SortitionDB, &[(NakamotoBlock, u64, ExecutionCost)], ) -> Vec, + { + self.make_nakamoto_tenure_and( + tenure_change, + coinbase, + signers, + |_| {}, + block_builder, + |_| true, + ) + } + + /// Produce and process a Nakamoto tenure, after processing the block-commit from + /// begin_nakamoto_tenure(). You'd process the burnchain ops from begin_nakamoto_tenure(), + /// take the consensus hash, and feed it in here. + /// + /// Returns the blocks, their sizes, and runtime costs + pub fn make_nakamoto_tenure_and( + &mut self, + tenure_change: StacksTransaction, + coinbase: StacksTransaction, + signers: &mut TestSigners, + miner_setup: S, + block_builder: F, + after_block: G, + ) -> Vec<(NakamotoBlock, u64, ExecutionCost)> + where + S: FnMut(&mut NakamotoBlockBuilder), + F: FnMut( + &mut TestMiner, + &mut StacksChainState, + &SortitionDB, + &[(NakamotoBlock, u64, ExecutionCost)], + ) -> Vec, + G: FnMut(&mut NakamotoBlock) -> bool, { let cycle = self.get_reward_cycle(); let mut stacks_node = self.stacks_node.take().unwrap(); - let sortdb = self.sortdb.take().unwrap(); + let mut sortdb = self.sortdb.take().unwrap(); // Ensure the signers are setup for the current cycle signers.generate_aggregate_key(cycle); let blocks = TestStacksNode::make_nakamoto_tenure_blocks( &mut stacks_node.chainstate, - &sortdb, + &mut sortdb, &mut self.miner, signers, &tenure_change @@ -1013,20 +1342,38 @@ impl<'a> TestPeer<'a> { Some(tenure_change), Some(coinbase), &mut self.coord, + miner_setup, block_builder, + after_block, ); let just_blocks = blocks .clone() .into_iter() - .map(|(block, _, _)| block) + .map(|(block, _, _, _)| block) .collect(); + stacks_node.add_nakamoto_tenure_blocks(just_blocks); + let mut malleablized_blocks: Vec = blocks + .clone() + .into_iter() + .map(|(_, _, _, malleablized)| malleablized) + .flatten() + .collect(); + + self.malleablized_blocks.append(&mut malleablized_blocks); + + let block_data = blocks + .clone() + .into_iter() + .map(|(blk, sz, cost, _)| (blk, sz, cost)) + .collect(); + self.stacks_node = Some(stacks_node); self.sortdb = Some(sortdb); - blocks + block_data } /// Produce and process a Nakamoto tenure extension. @@ -1049,7 +1396,7 @@ impl<'a> TestPeer<'a> { ) -> Vec, { let mut stacks_node = self.stacks_node.take().unwrap(); - let sortdb = self.sortdb.take().unwrap(); + let mut sortdb = self.sortdb.take().unwrap(); let tenure_extend_payload = if let TransactionPayload::TenureChange(ref tc) = &tenure_extend_tx.payload { @@ -1074,7 +1421,7 @@ impl<'a> TestPeer<'a> { let blocks = TestStacksNode::make_nakamoto_tenure_blocks( &mut stacks_node.chainstate, - &sortdb, + &mut sortdb, &mut self.miner, signers, &tenure_extend_tx @@ -1085,47 +1432,72 @@ impl<'a> TestPeer<'a> { Some(tenure_extend_tx), None, &mut self.coord, + |_| {}, block_builder, + |_| true, ); let just_blocks = blocks .clone() .into_iter() - .map(|(block, _, _)| block) + .map(|(block, _, _, _)| block) .collect(); + stacks_node.add_nakamoto_extended_blocks(just_blocks); + let mut malleablized_blocks: Vec = blocks + .clone() + .into_iter() + .map(|(_, _, _, malleablized)| malleablized) + .flatten() + .collect(); + + self.malleablized_blocks.append(&mut malleablized_blocks); + + let block_data = blocks + .clone() + .into_iter() + .map(|(blk, sz, cost, _)| (blk, sz, cost)) + .collect(); + self.stacks_node = Some(stacks_node); self.sortdb = Some(sortdb); - blocks + block_data } /// Accept a new Nakamoto tenure via the relayer, and then try to process them. pub fn process_nakamoto_tenure(&mut self, blocks: Vec) { debug!("Peer will process {} Nakamoto blocks", blocks.len()); - let sortdb = self.sortdb.take().unwrap(); + let mut sortdb = self.sortdb.take().unwrap(); let mut node = self.stacks_node.take().unwrap(); let tip = SortitionDB::get_canonical_sortition_tip(sortdb.conn()).unwrap(); - let mut sort_handle = sortdb.index_handle(&tip); node.add_nakamoto_tenure_blocks(blocks.clone()); for block in blocks.into_iter() { + let mut sort_handle = sortdb.index_handle(&tip); let block_id = block.block_id(); debug!("Process Nakamoto block {} ({:?}", &block_id, &block.header); let accepted = Relayer::process_new_nakamoto_block( + &self.network.burnchain, &sortdb, &mut sort_handle, &mut node.chainstate, - block, + &self.network.stacks_tip.block_id(), + &block, None, + NakamotoBlockObtainMethod::Pushed, ) .unwrap(); if accepted { test_debug!("Accepted Nakamoto block {}", &block_id); self.coord.handle_new_nakamoto_stacks_block().unwrap(); + + debug!("Begin check Nakamoto block {}", &block.block_id()); + TestPeer::check_processed_nakamoto_block(&mut sortdb, &mut node.chainstate, &block); + debug!("Eegin check Nakamoto block {}", &block.block_id()); } else { test_debug!("Did NOT accept Nakamoto block {}", &block_id); } @@ -1134,4 +1506,670 @@ impl<'a> TestPeer<'a> { self.sortdb = Some(sortdb); self.stacks_node = Some(node); } + + /// Get the tenure-start block of the parent tenure of `tenure_id_consensus_hash` + fn get_parent_tenure_start_header( + sortdb: &SortitionDB, + chainstate: &mut StacksChainState, + tip_block_id: &StacksBlockId, + tenure_id_consensus_hash: &ConsensusHash, + ) -> StacksHeaderInfo { + let Ok(Some(tenure_start_header)) = NakamotoChainState::get_tenure_start_block_header( + &mut chainstate.index_conn(), + &tip_block_id, + tenure_id_consensus_hash, + ) else { + panic!( + "No tenure-start block header for tenure {}", + tenure_id_consensus_hash + ); + }; + + let Ok(Some((tenure_start_block, _))) = chainstate + .nakamoto_blocks_db() + .get_nakamoto_block(&tenure_start_header.index_block_hash()) + else { + panic!( + "Unable to load tenure-start block {}", + &tenure_start_header.index_block_hash() + ); + }; + + let Some(tenure_start_tx) = tenure_start_block.get_tenure_change_tx_payload() else { + panic!( + "Tenure-start block {} has no tenure-change tx", + &tenure_start_header.index_block_hash() + ); + }; + + let prev_tenure_consensus_hash = &tenure_start_tx.prev_tenure_consensus_hash; + + // get the tenure-start block of the last tenure + let Ok(Some(prev_tenure_start_header)) = NakamotoChainState::get_tenure_start_block_header( + &mut chainstate.index_conn(), + &tip_block_id, + prev_tenure_consensus_hash, + ) else { + panic!( + "No tenure-start block header for tenure {}", + tenure_id_consensus_hash + ); + }; + + prev_tenure_start_header + } + + /// Get the block-commit for a tenure. It corresponds to the tenure-start block of + /// its parent tenure. + fn get_tenure_block_commit( + sortdb: &SortitionDB, + chainstate: &mut StacksChainState, + tip_block_id: &StacksBlockId, + tenure_id_consensus_hash: &ConsensusHash, + ) -> LeaderBlockCommitOp { + let prev_tenure_start_header = Self::get_parent_tenure_start_header( + sortdb, + chainstate, + tip_block_id, + tenure_id_consensus_hash, + ); + let block_hash = BlockHeaderHash(prev_tenure_start_header.index_block_hash().0); + let Ok(Some(block_commit)) = SortitionDB::get_block_commit_for_stacks_block( + sortdb.conn(), + tenure_id_consensus_hash, + &block_hash, + ) else { + panic!( + "No block-commit for tenure {}: parent tenure-start was {} {:?}", + tenure_id_consensus_hash, + &prev_tenure_start_header.index_block_hash(), + &prev_tenure_start_header + ); + }; + block_commit + } + + /// Load up all blocks from the given block back to the last tenure-change block-found tx + fn load_nakamoto_tenure( + chainstate: &StacksChainState, + tip_block_id: &StacksBlockId, + ) -> Vec { + // count up the number of blocks between `tip_block_id` and its ancestral tenure-change + let mut ancestors = vec![]; + let mut cursor = tip_block_id.clone(); + loop { + let block = chainstate + .nakamoto_blocks_db() + .get_nakamoto_block(&cursor) + .unwrap() + .unwrap() + .0; + cursor = block.header.parent_block_id.clone(); + let is_tenure_change = block.get_tenure_change_tx_payload().is_some(); + ancestors.push(block); + + if is_tenure_change { + break; + } + } + ancestors + } + + /// Check various properties of the chainstate regarding this nakamoto block. + /// Tests: + /// * get_coinbase_height + /// * get_tenure_start_block_header + /// * get_nakamoto_tenure_start_block_header + /// * get_highest_block_header_in_tenure + /// * get_block_vrf_proof + /// * get_nakamoto_tenure_vrf_proof + /// * get_parent_vrf_proof + /// * validate_vrf_seed + /// * check_block_commit_vrf_seed + /// * get_nakamoto_parent_tenure_id_consensus_hash + /// * get_ongoing_tenure + /// * get_block_found_tenure + /// * get_nakamoto_tenure_length + /// * has_processed_nakamoto_tenure + /// * check_nakamoto_tenure + /// * check_tenure_continuity + pub fn check_processed_nakamoto_block( + sortdb: &mut SortitionDB, + chainstate: &mut StacksChainState, + block: &NakamotoBlock, + ) { + let Ok(Some(parent_block_header)) = + NakamotoChainState::get_block_header(chainstate.db(), &block.header.parent_block_id) + else { + panic!("No parent block for {:?}", &block); + }; + + // get_coinbase_height + // Verify that it only increases if the given block has a tenure-change block-found + // transaction + let block_coinbase_height = NakamotoChainState::get_coinbase_height( + &mut chainstate.index_conn(), + &block.block_id(), + ) + .unwrap() + .unwrap(); + let parent_coinbase_height = NakamotoChainState::get_coinbase_height( + &mut chainstate.index_conn(), + &block.header.parent_block_id, + ) + .unwrap() + .unwrap(); + + if let Some(tenure_tx) = block.get_tenure_change_tx_payload() { + // crosses a tenure block-found boundary + assert_eq!(parent_coinbase_height + 1, block_coinbase_height); + } else { + assert_eq!(parent_coinbase_height, block_coinbase_height); + } + + // get_tenure_start_block_header + // Verify that each Nakamoto block's tenure-start header is defined + let Ok(Some(tenure_start_header)) = NakamotoChainState::get_tenure_start_block_header( + &mut chainstate.index_conn(), + &block.block_id(), + &block.header.consensus_hash, + ) else { + panic!("No tenure-start block header for {:?}", &block); + }; + + // get_nakamoto_tenure_start_block_header + // Verify that if this tenure_start_header is a Nakamoto block, then we can load it. + // Otherwise, we shouldn't be able to load it + if tenure_start_header + .anchored_header + .as_stacks_nakamoto() + .is_some() + { + assert_eq!( + tenure_start_header, + NakamotoChainState::get_nakamoto_tenure_start_block_header( + &mut chainstate.index_conn(), + &block.block_id(), + &block.header.consensus_hash + ) + .unwrap() + .unwrap() + ); + } else { + assert!(NakamotoChainState::get_nakamoto_tenure_start_block_header( + &mut chainstate.index_conn(), + &block.block_id(), + &block.header.consensus_hash + ) + .unwrap() + .is_none()); + } + + // only blocks with a tenure-change block-found transaction are tenure-start blocks + if block.get_tenure_change_tx_payload().is_some() { + assert_eq!( + &block.header, + tenure_start_header + .anchored_header + .as_stacks_nakamoto() + .unwrap() + ); + } else { + assert_ne!( + &block.header, + tenure_start_header + .anchored_header + .as_stacks_nakamoto() + .unwrap() + ); + } + + // get highest block header in tenure + if tenure_start_header + .anchored_header + .as_stacks_nakamoto() + .is_some() + { + assert_eq!( + &block.header, + NakamotoChainState::get_highest_block_header_in_tenure( + &mut chainstate.index_conn(), + &block.block_id(), + &block.header.consensus_hash + ) + .unwrap() + .unwrap() + .anchored_header + .as_stacks_nakamoto() + .unwrap() + ) + } else { + assert!(NakamotoChainState::get_highest_block_header_in_tenure( + &mut chainstate.index_conn(), + &block.block_id(), + &block.header.consensus_hash + ) + .unwrap() + .is_none()) + } + + // get_block_vrf_proof + // Verify that a VRF proof is defined for each tenure + let Ok(Some(vrf_proof)) = NakamotoChainState::get_block_vrf_proof( + &mut chainstate.index_conn(), + &block.block_id(), + &block.header.consensus_hash, + ) else { + panic!( + "No VRF proof defined for tenure {}", + &block.header.consensus_hash + ); + }; + + // get_nakamoto_tenure_vrf_proof + // if this is the tenure-start block, then the block VRF proof must be the VRF proof stored in the headers + // DB fo it. Otherwise, there must not be a VRF proof for this block. + if block.get_tenure_change_tx_payload().is_some() { + let Ok(Some(block_vrf_proof)) = NakamotoChainState::get_nakamoto_tenure_vrf_proof( + chainstate.db(), + &block.block_id(), + ) else { + panic!( + "No VRF proof stored for tenure-start block {}: {:?}", + &block.block_id(), + &block + ); + }; + assert_eq!(block_vrf_proof, vrf_proof); + } else { + // this block has no VRF proof defined + assert!(NakamotoChainState::get_nakamoto_tenure_vrf_proof( + chainstate.db(), + &block.block_id() + ) + .unwrap() + .is_none()); + } + + // get_parent_vrf_proof + // The parent VRF proof needs to be the same as the VRF proof for the parent tenure + let parent_tenure_start = Self::get_parent_tenure_start_header( + sortdb, + chainstate, + &block.block_id(), + &block.header.consensus_hash, + ); + let tenure_block_commit = Self::get_tenure_block_commit( + sortdb, + chainstate, + &block.block_id(), + &block.header.consensus_hash, + ); + let parent_vrf_proof = NakamotoChainState::get_parent_vrf_proof( + &mut chainstate.index_conn(), + &block.block_id(), + &sortdb.conn(), + &block.header.consensus_hash, + &tenure_block_commit.txid, + ) + .unwrap(); + + if let Ok(Some(expected_parent_vrf_proof)) = + NakamotoChainState::get_nakamoto_tenure_vrf_proof( + chainstate.db(), + &parent_tenure_start.index_block_hash(), + ) + { + assert_eq!(expected_parent_vrf_proof, parent_vrf_proof); + } else if parent_tenure_start + .anchored_header + .as_stacks_nakamoto() + .is_some() + { + panic!( + "No VRF proof stored for parent Nakamoto tenure-start block {}: {:?}", + &parent_tenure_start.index_block_hash(), + &parent_tenure_start + ); + }; + + // get_nakamoto_parent_tenure_id_consensus_hash + // The parent tenure start header must have the parent tenure's consensus hash. + assert_eq!( + NakamotoChainState::get_nakamoto_parent_tenure_id_consensus_hash( + &mut chainstate.index_conn(), + &block.block_id(), + &block.header.consensus_hash + ) + .unwrap() + .unwrap(), + parent_tenure_start.consensus_hash + ); + + // get_ongoing_tenure + // changes when we cross _any_ boundary + if let Some(tenure_tx) = block.get_tenure_tx_payload() { + if parent_block_header + .anchored_header + .as_stacks_nakamoto() + .is_some() + { + // crosses a tenure block-found or extend boundary + assert_ne!( + NakamotoChainState::get_ongoing_tenure( + &mut chainstate.index_conn(), + &block.block_id() + ) + .unwrap() + .unwrap(), + NakamotoChainState::get_ongoing_tenure( + &mut chainstate.index_conn(), + &parent_block_header.index_block_hash() + ) + .unwrap() + .unwrap() + ); + } else { + assert!(NakamotoChainState::get_ongoing_tenure( + &mut chainstate.index_conn(), + &parent_block_header.index_block_hash() + ) + .unwrap() + .is_none()); + } + } else { + if parent_block_header + .anchored_header + .as_stacks_nakamoto() + .is_some() + { + assert_eq!( + NakamotoChainState::get_ongoing_tenure( + &mut chainstate.index_conn(), + &block.block_id() + ) + .unwrap() + .unwrap(), + NakamotoChainState::get_ongoing_tenure( + &mut chainstate.index_conn(), + &parent_block_header.index_block_hash() + ) + .unwrap() + .unwrap() + ); + } else { + assert!(NakamotoChainState::get_ongoing_tenure( + &mut chainstate.index_conn(), + &parent_block_header.index_block_hash() + ) + .unwrap() + .is_none()); + } + } + + // get_block_found_tenure + // changes when we cross a tenure-change block-found boundary + if let Some(tenure_tx) = block.get_tenure_change_tx_payload() { + if parent_block_header + .anchored_header + .as_stacks_nakamoto() + .is_some() + { + // crosses a tenure block-found or extend boundary + assert_ne!( + NakamotoChainState::get_block_found_tenure( + &mut chainstate.index_conn(), + &block.block_id(), + &block.header.consensus_hash + ) + .unwrap() + .unwrap(), + NakamotoChainState::get_block_found_tenure( + &mut chainstate.index_conn(), + &block.block_id(), + &parent_block_header.consensus_hash + ) + .unwrap() + .unwrap() + ); + } else { + assert!(NakamotoChainState::get_block_found_tenure( + &mut chainstate.index_conn(), + &block.block_id(), + &parent_block_header.consensus_hash + ) + .unwrap() + .is_none()); + } + } else { + if parent_block_header + .anchored_header + .as_stacks_nakamoto() + .is_some() + { + assert_eq!( + NakamotoChainState::get_block_found_tenure( + &mut chainstate.index_conn(), + &block.block_id(), + &block.header.consensus_hash + ) + .unwrap() + .unwrap(), + NakamotoChainState::get_block_found_tenure( + &mut chainstate.index_conn(), + &block.block_id(), + &parent_block_header.consensus_hash + ) + .unwrap() + .unwrap() + ); + } else { + assert!(NakamotoChainState::get_block_found_tenure( + &mut chainstate.index_conn(), + &block.block_id(), + &parent_block_header.consensus_hash + ) + .unwrap() + .is_none()); + } + } + + // get_nakamoto_tenure_length + // compare the DB to the block's ancestors + let ancestors = Self::load_nakamoto_tenure(chainstate, &block.block_id()); + assert!(ancestors.len() > 0); + assert_eq!( + ancestors.len(), + NakamotoChainState::get_nakamoto_tenure_length(chainstate.db(), &block.block_id()) + .unwrap() as usize + ); + + // has_processed_nakamoto_tenure + // this tenure is unprocessed as of this block. + // the parent tenure is already processed. + assert!(!NakamotoChainState::has_processed_nakamoto_tenure( + &mut chainstate.index_conn(), + &block.block_id(), + &block.header.consensus_hash + ) + .unwrap()); + if parent_tenure_start + .anchored_header + .as_stacks_nakamoto() + .is_some() + { + // MARF stores parent tenure info for Nakamoto + assert!(NakamotoChainState::has_processed_nakamoto_tenure( + &mut chainstate.index_conn(), + &block.block_id(), + &parent_tenure_start.consensus_hash + ) + .unwrap()); + } else { + // MARF does NOT store parent tenure info for epoch2 + assert!(!NakamotoChainState::has_processed_nakamoto_tenure( + &mut chainstate.index_conn(), + &block.block_id(), + &parent_tenure_start.consensus_hash + ) + .unwrap()); + } + + // validate_vrf_seed + // Check against the tenure block-commit + assert!(block + .validate_vrf_seed( + sortdb.conn(), + &mut chainstate.index_conn(), + &tenure_block_commit + ) + .is_ok()); + let mut bad_commit = tenure_block_commit.clone(); + bad_commit.new_seed = VRFSeed([0xff; 32]); + assert!(block + .validate_vrf_seed(sortdb.conn(), &mut chainstate.index_conn(), &bad_commit) + .is_err()); + + // check_block_commit_vrf_seed + assert!(NakamotoChainState::check_block_commit_vrf_seed( + &mut chainstate.index_conn(), + sortdb.conn(), + &block + ) + .is_ok()); + + if let Some(tenure_tx) = block.get_tenure_tx_payload() { + if let Some(expected_tenure) = NakamotoChainState::get_ongoing_tenure( + &mut chainstate.index_conn(), + &block.header.parent_block_id, + ) + .unwrap() + { + // this block connects to its parent's tenure + assert_eq!( + expected_tenure, + NakamotoChainState::check_nakamoto_tenure( + &mut chainstate.index_conn(), + &mut sortdb.index_handle_at_tip(), + &block.header, + tenure_tx + ) + .unwrap() + .unwrap() + ); + } else { + // this block connects to the last epoch 2.x tenure + assert_eq!( + NakamotoChainState::check_first_nakamoto_tenure_change( + chainstate.db(), + tenure_tx + ) + .unwrap() + .unwrap(), + NakamotoChainState::check_nakamoto_tenure( + &mut chainstate.index_conn(), + &mut sortdb.index_handle_at_tip(), + &block.header, + tenure_tx + ) + .unwrap() + .unwrap() + ); + } + + if tenure_tx.cause == TenureChangeCause::BlockFound { + // block-founds are always in new tenures + assert!(!NakamotoChainState::check_tenure_continuity( + &mut chainstate.index_conn(), + &parent_block_header.consensus_hash, + &block.header + ) + .unwrap()); + } else { + // extends are in the same tenure as their parents + assert!(NakamotoChainState::check_tenure_continuity( + &mut chainstate.index_conn(), + &parent_block_header.consensus_hash, + &block.header + ) + .unwrap()); + } + + // get a valid but too-old consensus hash + let prev_tenure_sn = SortitionDB::get_block_snapshot_consensus( + sortdb.conn(), + &tenure_tx.prev_tenure_consensus_hash, + ) + .unwrap() + .unwrap(); + let invalid_tenure_sn = + SortitionDB::get_block_snapshot(sortdb.conn(), &prev_tenure_sn.parent_sortition_id) + .unwrap() + .unwrap(); + + // this fails if we change any tenure-identifying fields + let mut bad_tenure_tx = tenure_tx.clone(); + bad_tenure_tx.tenure_consensus_hash = invalid_tenure_sn.consensus_hash.clone(); + assert!(NakamotoChainState::check_nakamoto_tenure( + &mut chainstate.index_conn(), + &mut sortdb.index_handle_at_tip(), + &block.header, + &bad_tenure_tx + ) + .unwrap() + .is_none()); + + let mut bad_tenure_tx = tenure_tx.clone(); + bad_tenure_tx.prev_tenure_consensus_hash = invalid_tenure_sn.consensus_hash.clone(); + assert!(NakamotoChainState::check_nakamoto_tenure( + &mut chainstate.index_conn(), + &mut sortdb.index_handle_at_tip(), + &block.header, + &bad_tenure_tx + ) + .unwrap() + .is_none()); + + let mut bad_tenure_tx = tenure_tx.clone(); + bad_tenure_tx.burn_view_consensus_hash = invalid_tenure_sn.consensus_hash.clone(); + assert!(NakamotoChainState::check_nakamoto_tenure( + &mut chainstate.index_conn(), + &mut sortdb.index_handle_at_tip(), + &block.header, + &bad_tenure_tx + ) + .unwrap() + .is_none()); + + let mut bad_tenure_tx = tenure_tx.clone(); + bad_tenure_tx.previous_tenure_end = + StacksBlockId(prev_tenure_sn.winning_stacks_block_hash.clone().0); + assert!(NakamotoChainState::check_nakamoto_tenure( + &mut chainstate.index_conn(), + &mut sortdb.index_handle_at_tip(), + &block.header, + &bad_tenure_tx + ) + .unwrap() + .is_none()); + + let mut bad_tenure_tx = tenure_tx.clone(); + bad_tenure_tx.previous_tenure_blocks = u32::MAX; + assert!(NakamotoChainState::check_nakamoto_tenure( + &mut chainstate.index_conn(), + &mut sortdb.index_handle_at_tip(), + &block.header, + &bad_tenure_tx + ) + .unwrap() + .is_none()); + } else { + assert!(NakamotoChainState::check_tenure_continuity( + &mut chainstate.index_conn(), + &parent_block_header.consensus_hash, + &block.header + ) + .unwrap()); + } + } } diff --git a/stackslib/src/chainstate/stacks/auth.rs b/stackslib/src/chainstate/stacks/auth.rs index d2981683c0f..06cf64d037e 100644 --- a/stackslib/src/chainstate/stacks/auth.rs +++ b/stackslib/src/chainstate/stacks/auth.rs @@ -23,19 +23,20 @@ use stacks_common::codec::{ read_next, write_next, Error as codec_error, StacksMessageCodec, MAX_MESSAGE_LEN, }; use stacks_common::types::chainstate::StacksAddress; -use stacks_common::types::StacksPublicKeyBuffer; +use stacks_common::types::{StacksEpochId, StacksPublicKeyBuffer}; use stacks_common::util::hash::{to_hex, Hash160, Sha512Trunc256Sum}; use stacks_common::util::retry::{BoundReader, RetryReader}; use stacks_common::util::secp256k1::{MessageSignature, MESSAGE_SIGNATURE_ENCODED_SIZE}; use crate::burnchains::{PrivateKey, PublicKey, Txid}; use crate::chainstate::stacks::{ - Error, MultisigHashMode, MultisigSpendingCondition, SinglesigHashMode, - SinglesigSpendingCondition, StacksPrivateKey, StacksPublicKey, TransactionAuth, - TransactionAuthField, TransactionAuthFieldID, TransactionAuthFlags, - TransactionPublicKeyEncoding, TransactionSpendingCondition, - C32_ADDRESS_VERSION_MAINNET_MULTISIG, C32_ADDRESS_VERSION_MAINNET_SINGLESIG, - C32_ADDRESS_VERSION_TESTNET_MULTISIG, C32_ADDRESS_VERSION_TESTNET_SINGLESIG, + Error, MultisigHashMode, MultisigSpendingCondition, OrderIndependentMultisigHashMode, + OrderIndependentMultisigSpendingCondition, SinglesigHashMode, SinglesigSpendingCondition, + StacksPrivateKey, StacksPublicKey, TransactionAuth, TransactionAuthField, + TransactionAuthFieldID, TransactionAuthFlags, TransactionPublicKeyEncoding, + TransactionSpendingCondition, C32_ADDRESS_VERSION_MAINNET_MULTISIG, + C32_ADDRESS_VERSION_MAINNET_SINGLESIG, C32_ADDRESS_VERSION_TESTNET_MULTISIG, + C32_ADDRESS_VERSION_TESTNET_SINGLESIG, }; use crate::net::{Error as net_error, STACKS_PUBLIC_KEY_ENCODED_SIZE}; @@ -314,6 +315,204 @@ impl MultisigSpendingCondition { } } +impl StacksMessageCodec for OrderIndependentMultisigSpendingCondition { + fn consensus_serialize(&self, fd: &mut W) -> Result<(), codec_error> { + write_next(fd, &(self.hash_mode.clone() as u8))?; + write_next(fd, &self.signer)?; + write_next(fd, &self.nonce)?; + write_next(fd, &self.tx_fee)?; + write_next(fd, &self.fields)?; + write_next(fd, &self.signatures_required)?; + Ok(()) + } + + fn consensus_deserialize( + fd: &mut R, + ) -> Result { + let hash_mode_u8: u8 = read_next(fd)?; + let hash_mode = OrderIndependentMultisigHashMode::from_u8(hash_mode_u8).ok_or( + codec_error::DeserializeError(format!( + "Failed to parse multisig spending condition: unknown hash mode {}", + hash_mode_u8 + )), + )?; + + let signer: Hash160 = read_next(fd)?; + let nonce: u64 = read_next(fd)?; + let tx_fee: u64 = read_next(fd)?; + let fields: Vec = { + let mut bound_read = BoundReader::from_reader(fd, MAX_MESSAGE_LEN as u64); + read_next(&mut bound_read) + }?; + + let signatures_required: u16 = read_next(fd)?; + + // read and decode _exactly_ num_signatures signature buffers + let mut num_sigs_given: u16 = 0; + let mut have_uncompressed = false; + for f in fields.iter() { + match *f { + TransactionAuthField::Signature(ref key_encoding, _) => { + num_sigs_given = + num_sigs_given + .checked_add(1) + .ok_or(codec_error::DeserializeError( + "Failed to parse order independent multisig spending condition: too many signatures" + .to_string(), + ))?; + if *key_encoding == TransactionPublicKeyEncoding::Uncompressed { + have_uncompressed = true; + } + } + TransactionAuthField::PublicKey(ref pubk) => { + if !pubk.compressed() { + have_uncompressed = true; + } + } + }; + } + + // must be given the right number of signatures + if num_sigs_given < signatures_required { + let msg = format!( + "Failed to deserialize order independent multisig spending condition: got {num_sigs_given} sigs, expected at least {signatures_required}" + ); + test_debug!("{msg}"); + return Err(codec_error::DeserializeError(msg)); + } + + // must all be compressed if we're using P2WSH + if have_uncompressed && hash_mode == OrderIndependentMultisigHashMode::P2WSH { + let msg = format!( + "Failed to deserialize order independent multisig spending condition: expected compressed keys only" + ); + test_debug!("{msg}"); + return Err(codec_error::DeserializeError(msg)); + } + + Ok(OrderIndependentMultisigSpendingCondition { + signer, + nonce, + tx_fee, + hash_mode, + fields, + signatures_required, + }) + } +} + +impl OrderIndependentMultisigSpendingCondition { + pub fn push_signature( + &mut self, + key_encoding: TransactionPublicKeyEncoding, + signature: MessageSignature, + ) -> () { + self.fields + .push(TransactionAuthField::Signature(key_encoding, signature)); + } + + pub fn push_public_key(&mut self, public_key: StacksPublicKey) -> () { + self.fields + .push(TransactionAuthField::PublicKey(public_key)); + } + + pub fn pop_auth_field(&mut self) -> Option { + self.fields.pop() + } + + pub fn address_mainnet(&self) -> StacksAddress { + StacksAddress { + version: C32_ADDRESS_VERSION_MAINNET_MULTISIG, + bytes: self.signer.clone(), + } + } + + pub fn address_testnet(&self) -> StacksAddress { + StacksAddress { + version: C32_ADDRESS_VERSION_TESTNET_MULTISIG, + bytes: self.signer.clone(), + } + } + + /// Authenticate a spending condition against an initial sighash. + /// In doing so, recover all public keys and verify that they hash to the signer + /// via the given hash mode. + pub fn verify( + &self, + initial_sighash: &Txid, + cond_code: &TransactionAuthFlags, + ) -> Result { + let mut pubkeys = vec![]; + let mut num_sigs: u16 = 0; + let mut have_uncompressed = false; + for field in self.fields.iter() { + let pubkey = match field { + TransactionAuthField::PublicKey(ref pubkey) => { + if !pubkey.compressed() { + have_uncompressed = true; + } + pubkey.clone() + } + TransactionAuthField::Signature(ref pubkey_encoding, ref sigbuf) => { + if *pubkey_encoding == TransactionPublicKeyEncoding::Uncompressed { + have_uncompressed = true; + } + + let (pubkey, _next_sighash) = TransactionSpendingCondition::next_verification( + &initial_sighash, + cond_code, + self.tx_fee, + self.nonce, + pubkey_encoding, + sigbuf, + )?; + num_sigs = num_sigs + .checked_add(1) + .ok_or(net_error::VerifyingError("Too many signatures".to_string()))?; + pubkey + } + }; + pubkeys.push(pubkey); + } + + if num_sigs < self.signatures_required { + return Err(net_error::VerifyingError(format!( + "Not enough signatures. Got {num_sigs}, expected at least {req}", + req = self.signatures_required + ))); + } + + if have_uncompressed && self.hash_mode == OrderIndependentMultisigHashMode::P2WSH { + return Err(net_error::VerifyingError( + "Uncompressed keys are not allowed in this hash mode".to_string(), + )); + } + + let addr_bytes = match StacksAddress::from_public_keys( + 0, + &self.hash_mode.to_address_hash_mode(), + self.signatures_required as usize, + &pubkeys, + ) { + Some(a) => a.bytes, + None => { + return Err(net_error::VerifyingError( + "Failed to generate address from public keys".to_string(), + )); + } + }; + + if addr_bytes != self.signer { + return Err(net_error::VerifyingError(format!( + "Signer hash does not equal hash of public key(s): {} != {}", + addr_bytes, self.signer + ))); + } + + Ok(initial_sighash.clone()) + } +} + impl StacksMessageCodec for SinglesigSpendingCondition { fn consensus_serialize(&self, fd: &mut W) -> Result<(), codec_error> { write_next(fd, &(self.hash_mode.clone() as u8))?; @@ -461,6 +660,9 @@ impl StacksMessageCodec for TransactionSpendingCondition { TransactionSpendingCondition::Multisig(ref data) => { data.consensus_serialize(fd)?; } + TransactionSpendingCondition::OrderIndependentMultisig(ref data) => { + data.consensus_serialize(fd)?; + } } Ok(()) } @@ -479,6 +681,10 @@ impl StacksMessageCodec for TransactionSpendingCondition { } else if MultisigHashMode::from_u8(hash_mode_u8).is_some() { let cond = MultisigSpendingCondition::consensus_deserialize(&mut rrd)?; TransactionSpendingCondition::Multisig(cond) + } else if OrderIndependentMultisigHashMode::from_u8(hash_mode_u8).is_some() { + let cond = + OrderIndependentMultisigSpendingCondition::consensus_deserialize(&mut rrd)?; + TransactionSpendingCondition::OrderIndependentMultisig(cond) } else { test_debug!("Invalid address hash mode {}", hash_mode_u8); return Err(codec_error::DeserializeError(format!( @@ -504,11 +710,11 @@ impl TransactionSpendingCondition { Some(TransactionSpendingCondition::Singlesig( SinglesigSpendingCondition { - signer: signer_addr.bytes.clone(), + signer: signer_addr.bytes, nonce: 0, tx_fee: 0, hash_mode: SinglesigHashMode::P2PKH, - key_encoding: key_encoding, + key_encoding, signature: MessageSignature::empty(), }, )) @@ -524,7 +730,7 @@ impl TransactionSpendingCondition { Some(TransactionSpendingCondition::Singlesig( SinglesigSpendingCondition { - signer: signer_addr.bytes.clone(), + signer: signer_addr.bytes, nonce: 0, tx_fee: 0, hash_mode: SinglesigHashMode::P2WPKH, @@ -541,13 +747,13 @@ impl TransactionSpendingCondition { let signer_addr = StacksAddress::from_public_keys( 0, &AddressHashMode::SerializeP2SH, - num_sigs as usize, + usize::from(num_sigs), &pubkeys, )?; Some(TransactionSpendingCondition::Multisig( MultisigSpendingCondition { - signer: signer_addr.bytes.clone(), + signer: signer_addr.bytes, nonce: 0, tx_fee: 0, hash_mode: MultisigHashMode::P2SH, @@ -557,6 +763,52 @@ impl TransactionSpendingCondition { )) } + pub fn new_multisig_order_independent_p2sh( + num_sigs: u16, + pubkeys: Vec, + ) -> Option { + let signer_addr = StacksAddress::from_public_keys( + 0, + &AddressHashMode::SerializeP2SH, + usize::from(num_sigs), + &pubkeys, + )?; + + Some(TransactionSpendingCondition::OrderIndependentMultisig( + OrderIndependentMultisigSpendingCondition { + signer: signer_addr.bytes, + nonce: 0, + tx_fee: 0, + hash_mode: OrderIndependentMultisigHashMode::P2SH, + fields: vec![], + signatures_required: num_sigs, + }, + )) + } + + pub fn new_multisig_order_independent_p2wsh( + num_sigs: u16, + pubkeys: Vec, + ) -> Option { + let signer_addr = StacksAddress::from_public_keys( + 0, + &AddressHashMode::SerializeP2WSH, + usize::from(num_sigs), + &pubkeys, + )?; + + Some(TransactionSpendingCondition::OrderIndependentMultisig( + OrderIndependentMultisigSpendingCondition { + signer: signer_addr.bytes, + nonce: 0, + tx_fee: 0, + hash_mode: OrderIndependentMultisigHashMode::P2WSH, + fields: vec![], + signatures_required: num_sigs, + }, + )) + } + pub fn new_multisig_p2wsh( num_sigs: u16, pubkeys: Vec, @@ -564,13 +816,13 @@ impl TransactionSpendingCondition { let signer_addr = StacksAddress::from_public_keys( 0, &AddressHashMode::SerializeP2WSH, - num_sigs as usize, + usize::from(num_sigs), &pubkeys, )?; Some(TransactionSpendingCondition::Multisig( MultisigSpendingCondition { - signer: signer_addr.bytes.clone(), + signer: signer_addr.bytes, nonce: 0, tx_fee: 0, hash_mode: MultisigHashMode::P2WSH, @@ -614,6 +866,17 @@ impl TransactionSpendingCondition { } num_sigs } + TransactionSpendingCondition::OrderIndependentMultisig(ref data) => { + let mut num_sigs: u16 = 0; + for field in data.fields.iter() { + if field.is_signature() { + num_sigs = num_sigs + .checked_add(1) + .expect("Unreasonable amount of signatures"); // something is seriously wrong if this fails + } + } + num_sigs + } } } @@ -623,6 +886,9 @@ impl TransactionSpendingCondition { TransactionSpendingCondition::Multisig(ref multisig_data) => { multisig_data.signatures_required } + TransactionSpendingCondition::OrderIndependentMultisig(ref multisig_data) => { + multisig_data.signatures_required + } } } @@ -630,6 +896,7 @@ impl TransactionSpendingCondition { match *self { TransactionSpendingCondition::Singlesig(ref data) => data.nonce, TransactionSpendingCondition::Multisig(ref data) => data.nonce, + TransactionSpendingCondition::OrderIndependentMultisig(ref data) => data.nonce, } } @@ -637,6 +904,7 @@ impl TransactionSpendingCondition { match *self { TransactionSpendingCondition::Singlesig(ref data) => data.tx_fee, TransactionSpendingCondition::Multisig(ref data) => data.tx_fee, + TransactionSpendingCondition::OrderIndependentMultisig(ref data) => data.tx_fee, } } @@ -648,6 +916,9 @@ impl TransactionSpendingCondition { TransactionSpendingCondition::Multisig(ref mut multisig_data) => { multisig_data.nonce = n; } + TransactionSpendingCondition::OrderIndependentMultisig(ref mut multisig_data) => { + multisig_data.nonce = n; + } } } @@ -659,6 +930,9 @@ impl TransactionSpendingCondition { TransactionSpendingCondition::Multisig(ref mut multisig_data) => { multisig_data.tx_fee = tx_fee; } + TransactionSpendingCondition::OrderIndependentMultisig(ref mut multisig_data) => { + multisig_data.tx_fee = tx_fee; + } } } @@ -666,6 +940,9 @@ impl TransactionSpendingCondition { match *self { TransactionSpendingCondition::Singlesig(ref singlesig_data) => singlesig_data.tx_fee, TransactionSpendingCondition::Multisig(ref multisig_data) => multisig_data.tx_fee, + TransactionSpendingCondition::OrderIndependentMultisig(ref multisig_data) => { + multisig_data.tx_fee + } } } @@ -674,6 +951,9 @@ impl TransactionSpendingCondition { match *self { TransactionSpendingCondition::Singlesig(ref data) => data.address_mainnet(), TransactionSpendingCondition::Multisig(ref data) => data.address_mainnet(), + TransactionSpendingCondition::OrderIndependentMultisig(ref data) => { + data.address_mainnet() + } } } @@ -682,6 +962,9 @@ impl TransactionSpendingCondition { match *self { TransactionSpendingCondition::Singlesig(ref data) => data.address_testnet(), TransactionSpendingCondition::Multisig(ref data) => data.address_testnet(), + TransactionSpendingCondition::OrderIndependentMultisig(ref data) => { + data.address_testnet() + } } } @@ -707,6 +990,11 @@ impl TransactionSpendingCondition { multisig_data.nonce = 0; multisig_data.fields.clear(); } + TransactionSpendingCondition::OrderIndependentMultisig(ref mut multisig_data) => { + multisig_data.tx_fee = 0; + multisig_data.nonce = 0; + multisig_data.fields.clear(); + } } } @@ -842,6 +1130,9 @@ impl TransactionSpendingCondition { TransactionSpendingCondition::Multisig(ref data) => { data.verify(initial_sighash, cond_code) } + TransactionSpendingCondition::OrderIndependentMultisig(ref data) => { + data.verify(initial_sighash, cond_code) + } } } } @@ -908,6 +1199,26 @@ impl TransactionAuth { } } + pub fn from_order_independent_p2sh( + privks: &[StacksPrivateKey], + num_sigs: u16, + ) -> Option { + let pubks = privks.iter().map(StacksPublicKey::from_private).collect(); + + TransactionSpendingCondition::new_multisig_order_independent_p2sh(num_sigs, pubks) + .map(TransactionAuth::Standard) + } + + pub fn from_order_independent_p2wsh( + privks: &[StacksPrivateKey], + num_sigs: u16, + ) -> Option { + let pubks = privks.iter().map(StacksPublicKey::from_private).collect(); + + TransactionSpendingCondition::new_multisig_order_independent_p2wsh(num_sigs, pubks) + .map(TransactionAuth::Standard) + } + pub fn from_p2wpkh(privk: &StacksPrivateKey) -> Option { match TransactionSpendingCondition::new_singlesig_p2wpkh(StacksPublicKey::from_private( privk, @@ -1076,10 +1387,40 @@ impl TransactionAuth { } } } + + /// Checks if this TransactionAuth is supported in the passed epoch + /// OrderIndependent multisig is not supported before epoch 3.0 + pub fn is_supported_in_epoch(&self, epoch_id: StacksEpochId) -> bool { + match &self { + TransactionAuth::Sponsored(ref origin, ref sponsor) => { + let origin_supported = match origin { + TransactionSpendingCondition::OrderIndependentMultisig(..) => { + epoch_id >= StacksEpochId::Epoch30 + } + _ => true, + }; + let sponsor_supported = match sponsor { + TransactionSpendingCondition::OrderIndependentMultisig(..) => { + epoch_id >= StacksEpochId::Epoch30 + } + _ => true, + }; + origin_supported && sponsor_supported + } + TransactionAuth::Standard(ref origin) => match origin { + TransactionSpendingCondition::OrderIndependentMultisig(..) => { + epoch_id >= StacksEpochId::Epoch30 + } + _ => true, + }, + } + } } +#[rustfmt::skip] #[cfg(test)] mod test { + use stacks_common::types::StacksEpochId::Epoch30; use super::*; use crate::chainstate::stacks::{StacksPublicKey as PubKey, *}; use crate::net::codec::test::check_codec_and_corruption; @@ -1102,112 +1443,15 @@ mod test { // hash mode SinglesigHashMode::P2PKH as u8, // signer - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, + 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, // nonce - 0x00, - 0x00, - 0x00, - 0x00, - 0x00, - 0x00, - 0x00, - 0x7b, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7b, // fee rate - 0x00, - 0x00, - 0x00, - 0x00, - 0x00, - 0x00, - 0x01, - 0xc8, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0xc8, // key encoding, TransactionPublicKeyEncoding::Uncompressed as u8, // signature - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, ]; let spending_condition_p2pkh_compressed = SinglesigSpendingCondition { @@ -1223,112 +1467,15 @@ mod test { // hash mode SinglesigHashMode::P2PKH as u8, // signer - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, + 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, // nonce - 0x00, - 0x00, - 0x00, - 0x00, - 0x00, - 0x00, - 0x01, - 0x59, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x59, // fee rate - 0x00, - 0x00, - 0x00, - 0x00, - 0x00, - 0x00, - 0x01, - 0xc8, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0xc8, // key encoding TransactionPublicKeyEncoding::Compressed as u8, // signature - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, + 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, ]; let spending_conditions = vec![ @@ -1368,224 +1515,27 @@ mod test { // hash mode MultisigHashMode::P2SH as u8, // signer - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, + 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, // nonce - 0x00, - 0x00, - 0x00, - 0x00, - 0x00, - 0x00, - 0x00, - 0x7b, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7b, // fee rate - 0x00, - 0x00, - 0x00, - 0x00, - 0x00, - 0x00, - 0x01, - 0xc8, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0xc8, // fields length - 0x00, - 0x00, - 0x00, - 0x03, + 0x00, 0x00, 0x00, 0x03, // field #1: signature TransactionAuthFieldID::SignatureUncompressed as u8, // field #1: signature - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, // field #2: signature TransactionAuthFieldID::SignatureUncompressed as u8, // filed #2: signature - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, + 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, // field #3: public key TransactionAuthFieldID::PublicKeyUncompressed as u8, - // field #3: key (compressed) - 0x03, - 0xef, - 0x23, - 0x40, - 0x51, - 0x8b, - 0x58, - 0x67, - 0xb2, - 0x35, - 0x98, - 0xa9, - 0xcf, - 0x74, - 0x61, - 0x1f, - 0x8b, - 0x98, - 0x06, - 0x4f, - 0x7d, - 0x55, - 0xcd, - 0xb8, - 0xc1, - 0x07, - 0xc6, - 0x7b, - 0x5e, - 0xfc, - 0xbc, - 0x5c, - 0x77, + // field #3: key (uncompressed) + 0x03, 0xef, 0x23, 0x40, 0x51, 0x8b, 0x58, 0x67, 0xb2, 0x35, 0x98, 0xa9, 0xcf, 0x74, 0x61, 0x1f, 0x8b, 0x98, 0x06, 0x4f, 0x7d, 0x55, 0xcd, 0xb8, 0xc1, 0x07, 0xc6, 0x7b, 0x5e, 0xfc, 0xbc, 0x5c, 0x77, // number of signatures required - 0x00, - 0x02, + 0x00, 0x02, ]; let spending_condition_p2sh_compressed = MultisigSpendingCondition { @@ -1616,224 +1566,27 @@ mod test { // hash mode MultisigHashMode::P2SH as u8, // signer - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, + 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, // nonce - 0x00, - 0x00, - 0x00, - 0x00, - 0x00, - 0x00, - 0x01, - 0xc8, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0xc8, // fee rate - 0x00, - 0x00, - 0x00, - 0x00, - 0x00, - 0x00, - 0x02, - 0x37, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x37, // fields length - 0x00, - 0x00, - 0x00, - 0x03, + 0x00, 0x00, 0x00, 0x03, // field #1: signature TransactionAuthFieldID::SignatureCompressed as u8, // field #1: signature - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, // field #2: signature TransactionAuthFieldID::SignatureCompressed as u8, // filed #2: signature - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, + 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, // field #3: public key TransactionAuthFieldID::PublicKeyCompressed as u8, // field #3: key (compressed) - 0x03, - 0xef, - 0x23, - 0x40, - 0x51, - 0x8b, - 0x58, - 0x67, - 0xb2, - 0x35, - 0x98, - 0xa9, - 0xcf, - 0x74, - 0x61, - 0x1f, - 0x8b, - 0x98, - 0x06, - 0x4f, - 0x7d, - 0x55, - 0xcd, - 0xb8, - 0xc1, - 0x07, - 0xc6, - 0x7b, - 0x5e, - 0xfc, - 0xbc, - 0x5c, - 0x77, + 0x03, 0xef, 0x23, 0x40, 0x51, 0x8b, 0x58, 0x67, 0xb2, 0x35, 0x98, 0xa9, 0xcf, 0x74, 0x61, 0x1f, 0x8b, 0x98, 0x06, 0x4f, 0x7d, 0x55, 0xcd, 0xb8, 0xc1, 0x07, 0xc6, 0x7b, 0x5e, 0xfc, 0xbc, 0x5c, 0x77, // number of signatures - 0x00, - 0x02, + 0x00, 0x02, ]; let spending_conditions = vec![ @@ -1854,144 +1607,51 @@ mod test { } #[test] - fn tx_stacks_spending_condition_p2wpkh() { - let spending_condition_p2wpkh_compressed = SinglesigSpendingCondition { + fn tx_stacks_spending_condition_order_independent_p2sh() { + // order independent p2sh + let spending_condition_order_independent_p2sh_uncompressed = OrderIndependentMultisigSpendingCondition { signer: Hash160([0x11; 20]), - hash_mode: SinglesigHashMode::P2WPKH, - key_encoding: TransactionPublicKeyEncoding::Compressed, - nonce: 345, - tx_fee: 567, - signature: MessageSignature::from_raw(&vec![0xfe; 65]), + hash_mode: OrderIndependentMultisigHashMode::P2SH, + nonce: 123, + tx_fee: 456, + fields: vec![ + TransactionAuthField::Signature(TransactionPublicKeyEncoding::Uncompressed, MessageSignature::from_raw(&vec![0xff; 65])), + TransactionAuthField::Signature(TransactionPublicKeyEncoding::Uncompressed, MessageSignature::from_raw(&vec![0xfe; 65])), + TransactionAuthField::PublicKey(PubKey::from_hex("04ef2340518b5867b23598a9cf74611f8b98064f7d55cdb8c107c67b5efcbc5c771f112f919b00a6c6c5f51f7c63e1762fe9fac9b66ec75a053db7f51f4a52712b").unwrap()), + ], + signatures_required: 2 }; - let spending_condition_p2wpkh_compressed_bytes = vec![ + let spending_condition_order_independent_p2sh_uncompressed_bytes = vec![ // hash mode - SinglesigHashMode::P2WPKH as u8, + OrderIndependentMultisigHashMode::P2SH as u8, // signer - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, + 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, // nonce - 0x00, - 0x00, - 0x00, - 0x00, - 0x00, - 0x00, - 0x01, - 0x59, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7b, // fee rate - 0x00, - 0x00, - 0x00, - 0x00, - 0x00, - 0x00, - 0x02, - 0x37, - // key encoding - TransactionPublicKeyEncoding::Compressed as u8, - // signature - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0xc8, + // fields length + 0x00, 0x00, 0x00, 0x03, + // field #1: signature + TransactionAuthFieldID::SignatureUncompressed as u8, + // field #1: signature + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + // field #2: signature + TransactionAuthFieldID::SignatureUncompressed as u8, + // filed #2: signature + 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, + // field #3: public key + TransactionAuthFieldID::PublicKeyUncompressed as u8, + // field #3: key (uncompressed) + 0x03, 0xef, 0x23, 0x40, 0x51, 0x8b, 0x58, 0x67, 0xb2, 0x35, 0x98, 0xa9, 0xcf, 0x74, 0x61, 0x1f, 0x8b, 0x98, 0x06, 0x4f, 0x7d, 0x55, 0xcd, 0xb8, 0xc1, 0x07, 0xc6, 0x7b, 0x5e, 0xfc, 0xbc, 0x5c, 0x77, + // number of signatures required + 0x00, 0x02, ]; - let spending_conditions = vec![spending_condition_p2wpkh_compressed]; - let spending_conditions_bytes = vec![spending_condition_p2wpkh_compressed_bytes]; - - for i in 0..spending_conditions.len() { - check_codec_and_corruption::( - &spending_conditions[i], - &spending_conditions_bytes[i], - ); - } - } - - #[test] - fn tx_stacks_spending_condition_p2wsh() { - let spending_condition_p2wsh = MultisigSpendingCondition { + let spending_condition_order_independent_p2sh_compressed = OrderIndependentMultisigSpendingCondition { signer: Hash160([0x11; 20]), - hash_mode: MultisigHashMode::P2WSH, + hash_mode: OrderIndependentMultisigHashMode::P2SH, nonce: 456, tx_fee: 567, fields: vec![ @@ -2007,234 +1667,144 @@ mod test { PubKey::from_hex( "03ef2340518b5867b23598a9cf74611f8b98064f7d55cdb8c107c67b5efcbc5c77", ) - .unwrap(), + .unwrap(), ), ], signatures_required: 2, }; - let spending_condition_p2wsh_bytes = vec![ + let spending_condition_order_independent_p2sh_compressed_bytes = vec![ // hash mode - MultisigHashMode::P2WSH as u8, + OrderIndependentMultisigHashMode::P2SH as u8, // signer - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, + 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, // nonce - 0x00, - 0x00, - 0x00, - 0x00, - 0x00, - 0x00, - 0x01, - 0xc8, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0xc8, // fee rate - 0x00, - 0x00, - 0x00, - 0x00, - 0x00, - 0x00, - 0x02, - 0x37, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x37, // fields length - 0x00, - 0x00, - 0x00, - 0x03, + 0x00, 0x00, 0x00, 0x03, // field #1: signature TransactionAuthFieldID::SignatureCompressed as u8, // field #1: signature - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, // field #2: signature TransactionAuthFieldID::SignatureCompressed as u8, // filed #2: signature - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, + 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, // field #3: public key TransactionAuthFieldID::PublicKeyCompressed as u8, // field #3: key (compressed) - 0x03, - 0xef, - 0x23, - 0x40, - 0x51, - 0x8b, - 0x58, - 0x67, - 0xb2, - 0x35, - 0x98, - 0xa9, - 0xcf, - 0x74, - 0x61, - 0x1f, - 0x8b, - 0x98, - 0x06, - 0x4f, - 0x7d, - 0x55, - 0xcd, - 0xb8, - 0xc1, - 0x07, - 0xc6, - 0x7b, - 0x5e, - 0xfc, - 0xbc, - 0x5c, - 0x77, + 0x03, 0xef, 0x23, 0x40, 0x51, 0x8b, 0x58, 0x67, 0xb2, 0x35, 0x98, 0xa9, 0xcf, 0x74, 0x61, 0x1f, 0x8b, 0x98, 0x06, 0x4f, 0x7d, 0x55, 0xcd, 0xb8, 0xc1, 0x07, 0xc6, 0x7b, 0x5e, 0xfc, 0xbc, 0x5c, 0x77, // number of signatures - 0x00, - 0x02, + 0x00, 0x02, + ]; + + let spending_conditions = vec![ + spending_condition_order_independent_p2sh_compressed, + spending_condition_order_independent_p2sh_uncompressed, + ]; + let spending_conditions_bytes = vec![ + spending_condition_order_independent_p2sh_compressed_bytes, + spending_condition_order_independent_p2sh_uncompressed_bytes, + ]; + + for i in 0..spending_conditions.len() { + check_codec_and_corruption::( + &spending_conditions[i], + &spending_conditions_bytes[i], + ); + } + } + + #[test] + fn tx_stacks_spending_condition_p2wpkh() { + let spending_condition_p2wpkh_compressed = SinglesigSpendingCondition { + signer: Hash160([0x11; 20]), + hash_mode: SinglesigHashMode::P2WPKH, + key_encoding: TransactionPublicKeyEncoding::Compressed, + nonce: 345, + tx_fee: 567, + signature: MessageSignature::from_raw(&vec![0xfe; 65]), + }; + + let spending_condition_p2wpkh_compressed_bytes = vec![ + // hash mode + SinglesigHashMode::P2WPKH as u8, + // signer + 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, + // nonce + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x59, + // fee rate + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x37, + // key encoding + TransactionPublicKeyEncoding::Compressed as u8, + // signature + 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, + ]; + + let spending_conditions = vec![spending_condition_p2wpkh_compressed]; + let spending_conditions_bytes = vec![spending_condition_p2wpkh_compressed_bytes]; + + for i in 0..spending_conditions.len() { + check_codec_and_corruption::( + &spending_conditions[i], + &spending_conditions_bytes[i], + ); + } + } + + #[test] + fn tx_stacks_spending_condition_p2wsh() { + let spending_condition_p2wsh = MultisigSpendingCondition { + signer: Hash160([0x11; 20]), + hash_mode: MultisigHashMode::P2WSH, + nonce: 456, + tx_fee: 567, + fields: vec![ + TransactionAuthField::Signature( + TransactionPublicKeyEncoding::Compressed, + MessageSignature::from_raw(&vec![0xff; 65]), + ), + TransactionAuthField::Signature( + TransactionPublicKeyEncoding::Compressed, + MessageSignature::from_raw(&vec![0xfe; 65]), + ), + TransactionAuthField::PublicKey( + PubKey::from_hex( + "03ef2340518b5867b23598a9cf74611f8b98064f7d55cdb8c107c67b5efcbc5c77", + ) + .unwrap(), + ), + ], + signatures_required: 2, + }; + + let spending_condition_p2wsh_bytes = vec![ + // hash mode + MultisigHashMode::P2WSH as u8, + // signer + 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, + // nonce + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0xc8, + // fee rate + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x37, + // fields length + 0x00, 0x00, 0x00, 0x03, + // field #1: signature + TransactionAuthFieldID::SignatureCompressed as u8, + // field #1: signature + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + // field #2: signature + TransactionAuthFieldID::SignatureCompressed as u8, + // filed #2: signature + 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, + // field #3: public key + TransactionAuthFieldID::PublicKeyCompressed as u8, + // field #3: key (compressed) + 0x03, 0xef, 0x23, 0x40, 0x51, 0x8b, 0x58, 0x67, 0xb2, 0x35, 0x98, 0xa9, 0xcf, 0x74, 0x61, 0x1f, 0x8b, 0x98, 0x06, 0x4f, 0x7d, 0x55, 0xcd, 0xb8, 0xc1, 0x07, 0xc6, 0x7b, 0x5e, 0xfc, 0xbc, 0x5c, 0x77, + // number of signatures + 0x00, 0x02, ]; let spending_conditions = vec![spending_condition_p2wsh]; @@ -2292,6 +1862,54 @@ mod test { ], signatures_required: 2 }), + TransactionSpendingCondition::OrderIndependentMultisig(OrderIndependentMultisigSpendingCondition { + signer: Hash160([0x11; 20]), + hash_mode: OrderIndependentMultisigHashMode::P2SH, + nonce: 123, + tx_fee: 567, + fields: vec![ + TransactionAuthField::Signature(TransactionPublicKeyEncoding::Uncompressed, MessageSignature::from_raw(&vec![0xff; 65])), + TransactionAuthField::Signature(TransactionPublicKeyEncoding::Uncompressed, MessageSignature::from_raw(&vec![0xfe; 65])), + TransactionAuthField::PublicKey(PubKey::from_hex("04ef2340518b5867b23598a9cf74611f8b98064f7d55cdb8c107c67b5efcbc5c771f112f919b00a6c6c5f51f7c63e1762fe9fac9b66ec75a053db7f51f4a52712b").unwrap()), + ], + signatures_required: 2 + }), + TransactionSpendingCondition::OrderIndependentMultisig(OrderIndependentMultisigSpendingCondition { + signer: Hash160([0x11; 20]), + hash_mode: OrderIndependentMultisigHashMode::P2SH, + nonce: 456, + tx_fee: 567, + fields: vec![ + TransactionAuthField::Signature(TransactionPublicKeyEncoding::Compressed, MessageSignature::from_raw(&vec![0xff; 65])), + TransactionAuthField::Signature(TransactionPublicKeyEncoding::Compressed, MessageSignature::from_raw(&vec![0xfe; 65])), + TransactionAuthField::PublicKey(PubKey::from_hex("03ef2340518b5867b23598a9cf74611f8b98064f7d55cdb8c107c67b5efcbc5c77").unwrap()) + ], + signatures_required: 2 + }), + TransactionSpendingCondition::OrderIndependentMultisig(OrderIndependentMultisigSpendingCondition { + signer: Hash160([0x11; 20]), + hash_mode: OrderIndependentMultisigHashMode::P2SH, + nonce: 123, + tx_fee: 567, + fields: vec![ + TransactionAuthField::Signature(TransactionPublicKeyEncoding::Uncompressed, MessageSignature::from_raw(&vec![0xff; 65])), + TransactionAuthField::Signature(TransactionPublicKeyEncoding::Uncompressed, MessageSignature::from_raw(&vec![0xfe; 65])), + TransactionAuthField::Signature(TransactionPublicKeyEncoding::Uncompressed, MessageSignature::from_raw(&vec![0xfd; 65])), + ], + signatures_required: 1 + }), + TransactionSpendingCondition::OrderIndependentMultisig(OrderIndependentMultisigSpendingCondition { + signer: Hash160([0x11; 20]), + hash_mode: OrderIndependentMultisigHashMode::P2SH, + nonce: 456, + tx_fee: 567, + fields: vec![ + TransactionAuthField::Signature(TransactionPublicKeyEncoding::Compressed, MessageSignature::from_raw(&vec![0xff; 65])), + TransactionAuthField::Signature(TransactionPublicKeyEncoding::Compressed, MessageSignature::from_raw(&vec![0xfe; 65])), + TransactionAuthField::Signature(TransactionPublicKeyEncoding::Compressed, MessageSignature::from_raw(&vec![0xfd; 65])), + ], + signatures_required: 1 + }), TransactionSpendingCondition::Singlesig(SinglesigSpendingCondition { signer: Hash160([0x11; 20]), hash_mode: SinglesigHashMode::P2WPKH, @@ -2311,6 +1929,30 @@ mod test { TransactionAuthField::PublicKey(PubKey::from_hex("03ef2340518b5867b23598a9cf74611f8b98064f7d55cdb8c107c67b5efcbc5c77").unwrap()) ], signatures_required: 2 + }), + TransactionSpendingCondition::OrderIndependentMultisig(OrderIndependentMultisigSpendingCondition { + signer: Hash160([0x11; 20]), + hash_mode: OrderIndependentMultisigHashMode::P2WSH, + nonce: 456, + tx_fee: 567, + fields: vec![ + TransactionAuthField::Signature(TransactionPublicKeyEncoding::Compressed, MessageSignature::from_raw(&vec![0xff; 65])), + TransactionAuthField::Signature(TransactionPublicKeyEncoding::Compressed, MessageSignature::from_raw(&vec![0xfe; 65])), + TransactionAuthField::PublicKey(PubKey::from_hex("03ef2340518b5867b23598a9cf74611f8b98064f7d55cdb8c107c67b5efcbc5c77").unwrap()) + ], + signatures_required: 2 + }), + TransactionSpendingCondition::OrderIndependentMultisig(OrderIndependentMultisigSpendingCondition { + signer: Hash160([0x11; 20]), + hash_mode: OrderIndependentMultisigHashMode::P2WSH, + nonce: 456, + tx_fee: 567, + fields: vec![ + TransactionAuthField::Signature(TransactionPublicKeyEncoding::Compressed, MessageSignature::from_raw(&vec![0xff; 65])), + TransactionAuthField::Signature(TransactionPublicKeyEncoding::Compressed, MessageSignature::from_raw(&vec![0xfe; 65])), + TransactionAuthField::Signature(TransactionPublicKeyEncoding::Compressed, MessageSignature::from_raw(&vec![0xfd; 65])), + ], + signatures_required: 1 }) ]; @@ -2349,799 +1991,151 @@ mod test { // hash mode 0xff, // signer - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, + 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, // nonce - 0x00, - 0x00, - 0x00, - 0x00, - 0x00, - 0x00, - 0x01, - 0xc8, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0xc8, // fee rate - 0x00, - 0x00, - 0x00, - 0x00, - 0x00, - 0x00, - 0x02, - 0x37, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x37, // key encoding, TransactionPublicKeyEncoding::Compressed as u8, // signature - 0xfd, - 0xfd, - 0xfd, - 0xfd, - 0xfd, - 0xfd, - 0xfd, - 0xfd, - 0xfd, - 0xfd, - 0xfd, - 0xfd, - 0xfd, - 0xfd, - 0xfd, - 0xfd, - 0xfd, - 0xfd, - 0xfd, - 0xfd, - 0xfd, - 0xfd, - 0xfd, - 0xfd, - 0xfd, - 0xfd, - 0xfd, - 0xfd, - 0xfd, - 0xfd, - 0xfd, - 0xfd, - 0xfd, - 0xfd, - 0xfd, - 0xfd, - 0xfd, - 0xfd, - 0xfd, - 0xfd, - 0xfd, - 0xfd, - 0xfd, - 0xfd, - 0xfd, - 0xfd, - 0xfd, - 0xfd, - 0xfd, - 0xfd, - 0xfd, - 0xfd, - 0xfd, - 0xfd, - 0xfd, - 0xfd, - 0xfd, - 0xfd, - 0xfd, - 0xfd, - 0xfd, - 0xfd, - 0xfd, - 0xfd, - 0xfd, - 0xfd, + 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, ]; let bad_hash_mode_multisig_bytes = vec![ // hash mode MultisigHashMode::P2SH as u8, // signer - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, + 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, // nonce - 0x00, - 0x00, - 0x00, - 0x00, - 0x00, - 0x00, - 0x01, - 0xc8, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0xc8, // fee rate - 0x00, - 0x00, - 0x00, - 0x00, - 0x00, - 0x00, - 0x02, - 0x37, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x37, // key encoding, TransactionPublicKeyEncoding::Compressed as u8, // signature - 0xfd, - 0xfd, - 0xfd, - 0xfd, - 0xfd, - 0xfd, - 0xfd, - 0xfd, - 0xfd, - 0xfd, - 0xfd, - 0xfd, - 0xfd, - 0xfd, - 0xfd, - 0xfd, - 0xfd, - 0xfd, - 0xfd, - 0xfd, - 0xfd, - 0xfd, - 0xfd, - 0xfd, - 0xfd, - 0xfd, - 0xfd, - 0xfd, - 0xfd, - 0xfd, - 0xfd, - 0xfd, - 0xfd, - 0xfd, - 0xfd, - 0xfd, - 0xfd, - 0xfd, - 0xfd, - 0xfd, - 0xfd, - 0xfd, - 0xfd, - 0xfd, - 0xfd, - 0xfd, - 0xfd, - 0xfd, - 0xfd, - 0xfd, - 0xfd, - 0xfd, - 0xfd, - 0xfd, - 0xfd, - 0xfd, - 0xfd, - 0xfd, - 0xfd, - 0xfd, - 0xfd, - 0xfd, - 0xfd, - 0xfd, - 0xfd, - 0xfd, - ]; - - // this will parse into a singlesig spending condition, but data will still remain. - // the reason it parses is because the public keys length field encodes a valid 2-byte - // prefix of a public key, and the parser will lump it into a public key - let bad_hash_mode_singlesig_bytes_parseable = vec![ - // hash mode - SinglesigHashMode::P2PKH as u8, - // signer - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - // nonce (embeds key encoding and part of the parsed nonce) - 0x00, - 0x00, - 0x00, - 0x00, - 0x00, - 0x00, - 0x01, - 0xc8, - // fee rate - 0x00, - 0x00, - 0x00, - 0x00, - 0x00, - 0x00, - 0x02, - 0x37, - // number of fields (embed part of the signature) - 0x00, - 0x00, - 0x00, - 0x01, - // field #1: signature - TransactionAuthFieldID::SignatureCompressed as u8, - // field #1: signature - 0x01, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - // number of signatures - 0x00, - 0x01, + 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, ]; - // wrong number of public keys (too many signatures) - let bad_public_key_count_bytes = vec![ + let bad_hash_mode_order_independent_multisig_bytes = vec![ // hash mode - MultisigHashMode::P2SH as u8, + OrderIndependentMultisigHashMode::P2SH as u8, // signer - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, + 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, // nonce - 0x00, - 0x00, - 0x00, - 0x00, - 0x00, - 0x00, - 0x01, - 0xc8, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0xc8, // fee rate - 0x00, - 0x00, - 0x00, - 0x00, - 0x00, - 0x00, - 0x02, - 0x37, - // fields length - 0x00, - 0x00, - 0x00, - 0x03, - // field #1: signature - TransactionAuthFieldID::SignatureCompressed as u8, - // field #1: signature - 0x01, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - // field #2: signature - TransactionAuthFieldID::SignatureCompressed as u8, - // filed #2: signature - 0x02, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - // field #3: public key - TransactionAuthFieldID::PublicKeyCompressed as u8, - // field #3: key (compressed) - 0x03, - 0xef, - 0x23, - 0x40, - 0x51, - 0x8b, - 0x58, - 0x67, - 0xb2, - 0x35, - 0x98, - 0xa9, - 0xcf, - 0x74, - 0x61, - 0x1f, - 0x8b, - 0x98, - 0x06, - 0x4f, - 0x7d, - 0x55, - 0xcd, - 0xb8, - 0xc1, - 0x07, - 0xc6, - 0x7b, - 0x5e, - 0xfc, - 0xbc, - 0x5c, - 0x77, - // number of signatures - 0x00, - 0x01, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x37, + // key encoding, + TransactionPublicKeyEncoding::Compressed as u8, + // signature + 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, ]; - // wrong number of public keys (not enough signatures) - let bad_public_key_count_bytes_2 = vec![ - // hash mode - MultisigHashMode::P2SH as u8, - // signer - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - // nonce - 0x00, - 0x00, - 0x00, - 0x00, - 0x00, - 0x00, - 0x01, - 0xc8, - // fee rate - 0x00, - 0x00, - 0x00, - 0x00, - 0x00, - 0x00, - 0x02, - 0x37, - // fields length - 0x00, - 0x00, - 0x00, - 0x03, - // field #1: signature - TransactionAuthFieldID::SignatureCompressed as u8, - // field #1: signature - 0x01, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, + // this will parse into a singlesig spending condition, but data will still remain. + // the reason it parses is because the public keys length field encodes a valid 2-byte + // prefix of a public key, and the parser will lump it into a public key + let bad_hash_mode_singlesig_bytes_parseable = vec![ + // hash mode + SinglesigHashMode::P2PKH as u8, + // signer + 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, + // nonce (embeds key encoding and part of the parsed nonce) + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0xc8, + // fee rate + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x37, + // number of fields (embed part of the signature) + 0x00, 0x00, 0x00, 0x01, + // field #1: signature + TransactionAuthFieldID::SignatureCompressed as u8, + // field #1: signature + 0x01, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + // number of signatures + 0x00, 0x01, + ]; + + // wrong number of public keys (too many signatures) + let bad_public_key_count_bytes = vec![ + // hash mode + MultisigHashMode::P2SH as u8, + // signer + 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, + // nonce + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0xc8, + // fee rate + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x37, + // fields length + 0x00, 0x00, 0x00, 0x03, + // field #1: signature + TransactionAuthFieldID::SignatureCompressed as u8, + // field #1: signature + 0x01, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + // field #2: signature + TransactionAuthFieldID::SignatureCompressed as u8, + // filed #2: signature + 0x02, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, + // field #3: public key + TransactionAuthFieldID::PublicKeyCompressed as u8, + // field #3: key (compressed) + 0x03, 0xef, 0x23, 0x40, 0x51, 0x8b, 0x58, 0x67, 0xb2, 0x35, 0x98, 0xa9, 0xcf, 0x74, 0x61, 0x1f, 0x8b, 0x98, 0x06, 0x4f, 0x7d, 0x55, 0xcd, 0xb8, 0xc1, 0x07, 0xc6, 0x7b, 0x5e, 0xfc, 0xbc, 0x5c, 0x77, + // number of signatures + 0x00, 0x01, + ]; + + // wrong number of public keys (not enough signatures) + let bad_public_key_count_bytes_2 = vec![ + // hash mode + MultisigHashMode::P2SH as u8, + // signer + 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, + // nonce + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0xc8, + // fee rate + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x37, + // fields length + 0x00, 0x00, 0x00, 0x03, + // field #1: signature + TransactionAuthFieldID::SignatureCompressed as u8, + // field #1: signature + 0x01, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + // field #2: signature + TransactionAuthFieldID::SignatureCompressed as u8, + // filed #2: signature + 0x02, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, + // field #3: public key + TransactionAuthFieldID::PublicKeyCompressed as u8, + // field #3: key (compressed) + 0x03, 0xef, 0x23, 0x40, 0x51, 0x8b, 0x58, 0x67, 0xb2, 0x35, 0x98, 0xa9, 0xcf, 0x74, 0x61, 0x1f, 0x8b, 0x98, 0x06, 0x4f, 0x7d, 0x55, 0xcd, 0xb8, 0xc1, 0x07, 0xc6, 0x7b, 0x5e, 0xfc, 0xbc, 0x5c, 0x77, + // number of signatures + 0x00, 0x03, + ]; + + // wrong number of public keys (not enough signatures) + let bad_public_key_count_bytes_3 = vec![ + // hash mode + OrderIndependentMultisigHashMode::P2SH as u8, + // signer + 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, + // nonce + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0xc8, + // fee rate + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x37, + // fields length + 0x00, 0x00, 0x00, 0x03, + // field #1: signature + TransactionAuthFieldID::SignatureCompressed as u8, + // field #1: signature + 0x01, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, // field #2: signature TransactionAuthFieldID::SignatureCompressed as u8, // filed #2: signature - 0x02, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, + 0x02, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, // field #3: public key TransactionAuthFieldID::PublicKeyCompressed as u8, // field #3: key (compressed) - 0x03, - 0xef, - 0x23, - 0x40, - 0x51, - 0x8b, - 0x58, - 0x67, - 0xb2, - 0x35, - 0x98, - 0xa9, - 0xcf, - 0x74, - 0x61, - 0x1f, - 0x8b, - 0x98, - 0x06, - 0x4f, - 0x7d, - 0x55, - 0xcd, - 0xb8, - 0xc1, - 0x07, - 0xc6, - 0x7b, - 0x5e, - 0xfc, - 0xbc, - 0x5c, - 0x77, + 0x03, 0xef, 0x23, 0x40, 0x51, 0x8b, 0x58, 0x67, 0xb2, 0x35, 0x98, 0xa9, 0xcf, 0x74, 0x61, 0x1f, 0x8b, 0x98, 0x06, 0x4f, 0x7d, 0x55, 0xcd, 0xb8, 0xc1, 0x07, 0xc6, 0x7b, 0x5e, 0xfc, 0xbc, 0x5c, 0x77, // number of signatures - 0x00, - 0x03, + 0x00, 0x03, ]; // hashing mode doesn't allow uncompressed keys @@ -3159,112 +2153,15 @@ mod test { // hash mode SinglesigHashMode::P2WPKH as u8, // signer - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, + 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, // nonce - 0x00, - 0x00, - 0x00, - 0x00, - 0x00, - 0x00, - 0x00, - 0x7b, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7b, // fee rate - 0x00, - 0x00, - 0x00, - 0x00, - 0x00, - 0x00, - 0x02, - 0x37, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x37, // public key uncompressed TransactionPublicKeyEncoding::Uncompressed as u8, // signature - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, ]; // hashing mode doesn't allow uncompressed keys @@ -3285,221 +2182,62 @@ mod test { // hash mode MultisigHashMode::P2WSH as u8, // signer - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, - 0x11, + 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, // nonce - 0x00, - 0x00, - 0x00, - 0x00, - 0x00, - 0x00, - 0x01, - 0xc8, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0xc8, // fee rate - 0x00, - 0x00, - 0x00, - 0x00, - 0x00, - 0x00, - 0x02, - 0x37, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x37, // number of fields - 0x00, - 0x00, - 0x00, - 0x03, + 0x00, 0x00, 0x00, 0x03, // signature TransactionAuthFieldID::SignatureUncompressed as u8, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, // signature TransactionAuthFieldID::SignatureUncompressed as u8, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, - 0xfe, + 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, // key TransactionAuthFieldID::PublicKeyUncompressed as u8, - 0x02, - 0xb7, - 0xe1, - 0x0d, - 0xd2, - 0xc0, - 0x2d, - 0xec, - 0x64, - 0x88, - 0x80, - 0xea, - 0x34, - 0x6e, - 0xce, - 0x86, - 0xa7, - 0x82, - 0x0c, - 0x4f, - 0xa5, - 0x11, - 0x4f, - 0xb5, - 0x00, - 0xb2, - 0x64, - 0x5f, - 0x6c, - 0x97, - 0x20, - 0x92, - 0xdb, + 0x02, 0xb7, 0xe1, 0x0d, 0xd2, 0xc0, 0x2d, 0xec, 0x64, 0x88, 0x80, 0xea, 0x34, 0x6e, 0xce, 0x86, 0xa7, 0x82, 0x0c, 0x4f, 0xa5, 0x11, 0x4f, 0xb5, 0x00, 0xb2, 0x64, 0x5f, 0x6c, 0x97, 0x20, 0x92, 0xdb, // signatures - 0x00, - 0x02, + 0x00, 0x02, + ]; + + // hashing mode doesn't allow uncompressed keys + let bad_order_independent_p2wsh_uncompressed = TransactionSpendingCondition::OrderIndependentMultisig(OrderIndependentMultisigSpendingCondition { + signer: Hash160([0x11; 20]), + hash_mode: OrderIndependentMultisigHashMode::P2WSH, + nonce: 456, + tx_fee: 567, + fields: vec![ + TransactionAuthField::Signature(TransactionPublicKeyEncoding::Uncompressed, MessageSignature::from_raw(&vec![0xff; 65])), + TransactionAuthField::Signature(TransactionPublicKeyEncoding::Uncompressed, MessageSignature::from_raw(&vec![0xfe; 65])), + TransactionAuthField::PublicKey(PubKey::from_hex("04b7e10dd2c02dec648880ea346ece86a7820c4fa5114fb500b2645f6c972092dbe2334a653db0ab8d8ccffa6c35d3919e4cf8da3aeedafc7b9eb8235d0f2e7fdc").unwrap()), + ], + signatures_required: 2 + }); + + let bad_order_independent_p2wsh_uncompressed_bytes = vec![ + // hash mode + OrderIndependentMultisigHashMode::P2WSH as u8, + // signer + 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, + // nonce + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0xc8, + // fee rate + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x37, + // number of fields + 0x00, 0x00, 0x00, 0x03, + // signature + TransactionAuthFieldID::SignatureUncompressed as u8, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + // signature + TransactionAuthFieldID::SignatureUncompressed as u8, + 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, + // key + TransactionAuthFieldID::PublicKeyUncompressed as u8, + 0x02, 0xb7, 0xe1, 0x0d, 0xd2, 0xc0, 0x2d, 0xec, 0x64, 0x88, 0x80, 0xea, 0x34, 0x6e, 0xce, 0x86, 0xa7, 0x82, 0x0c, 0x4f, 0xa5, 0x11, 0x4f, 0xb5, 0x00, 0xb2, 0x64, 0x5f, 0x6c, 0x97, 0x20, 0x92, 0xdb, + // signatures + 0x00, 0x02, ]; // we can serialize the invalid p2wpkh uncompressed condition, but we can't deserialize it @@ -3516,6 +2254,13 @@ mod test { .unwrap(); assert_eq!(actual_bytes, bad_p2wsh_uncompressed_bytes); + // we can serialize the invalid p2wsh uncompressed condition, but we can't deserialize it + let mut actual_bytes = vec![]; + bad_order_independent_p2wsh_uncompressed + .consensus_serialize(&mut actual_bytes) + .unwrap(); + assert_eq!(actual_bytes, bad_order_independent_p2wsh_uncompressed_bytes); + assert!(TransactionSpendingCondition::consensus_deserialize( &mut &bad_public_key_count_bytes[..] ) @@ -3524,6 +2269,10 @@ mod test { &mut &bad_public_key_count_bytes_2[..] ) .is_err()); + assert!(TransactionSpendingCondition::consensus_deserialize( + &mut &bad_public_key_count_bytes_3[..] + ) + .is_err()); assert!( TransactionSpendingCondition::consensus_deserialize(&mut &bad_hash_mode_bytes[..]) .is_err() @@ -3532,6 +2281,10 @@ mod test { &mut &bad_hash_mode_multisig_bytes[..] ) .is_err()); + assert!(TransactionSpendingCondition::consensus_deserialize( + &mut &bad_hash_mode_order_independent_multisig_bytes[..] + ) + .is_err()); assert!(TransactionSpendingCondition::consensus_deserialize( &mut &bad_p2wpkh_uncompressed_bytes[..] ) @@ -3540,6 +2293,10 @@ mod test { &mut &bad_p2wsh_uncompressed_bytes[..] ) .is_err()); + assert!(TransactionSpendingCondition::consensus_deserialize( + &mut &bad_order_independent_p2wsh_uncompressed_bytes[..] + ) + .is_err()); // corrupt but will parse with trailing bits assert!(TransactionSpendingCondition::consensus_deserialize( @@ -3633,4 +2390,90 @@ mod test { assert_eq!(next_pubkey, StacksPublicKey::from_private(&keys[i])); } } + + fn tx_auth_check_all_epochs( + auth: TransactionAuth, + activation_epoch_id: Option, + ) { + let epoch_list = [ + StacksEpochId::Epoch10, + StacksEpochId::Epoch20, + StacksEpochId::Epoch2_05, + StacksEpochId::Epoch21, + StacksEpochId::Epoch22, + StacksEpochId::Epoch23, + StacksEpochId::Epoch24, + StacksEpochId::Epoch25, + StacksEpochId::Epoch30, + ]; + + for epoch_id in epoch_list.iter() { + if activation_epoch_id.is_none() { + assert_eq!(auth.is_supported_in_epoch(*epoch_id), true); + } else if activation_epoch_id.unwrap() > *epoch_id { + assert_eq!(auth.is_supported_in_epoch(*epoch_id), false); + } else { + assert_eq!(auth.is_supported_in_epoch(*epoch_id), true); + } + } + } + + #[test] + fn tx_auth_is_supported_in_epoch() { + let privk_1 = StacksPrivateKey::from_hex( + "6d430bb91222408e7706c9001cfaeb91b08c2be6d5ac95779ab52c6b431950e001", + ).unwrap(); + + let privk_2 = StacksPrivateKey::from_hex( + "7e3af4db6af6b3c67e2c6c6d7d5983b519f4d9b3a6e00580ae96dcace3bde8bc01", + ).unwrap(); + + let auth_p2pkh = TransactionAuth::from_p2pkh(&privk_1).unwrap(); + let auth_sponsored_p2pkh = auth_p2pkh.clone().into_sponsored( + TransactionAuth::from_p2pkh(&privk_2).unwrap() + ).unwrap(); + + tx_auth_check_all_epochs(auth_p2pkh, None); + tx_auth_check_all_epochs(auth_sponsored_p2pkh, None); + + let auth_p2wpkh = TransactionAuth::from_p2wpkh(&privk_1).unwrap(); + let auth_sponsored_p2wpkh = auth_p2wpkh.clone().into_sponsored( + TransactionAuth::from_p2wpkh(&privk_2).unwrap() + ).unwrap(); + + tx_auth_check_all_epochs(auth_p2wpkh, None); + tx_auth_check_all_epochs(auth_sponsored_p2wpkh, None); + + let auth_p2sh = TransactionAuth::from_p2sh(&[privk_1, privk_2], 2).unwrap(); + let auth_sponsored_p2sh = auth_p2sh.clone().into_sponsored( + TransactionAuth::from_p2sh(&[privk_1, privk_2], 2).unwrap() + ).unwrap(); + + tx_auth_check_all_epochs(auth_p2sh, None); + tx_auth_check_all_epochs(auth_sponsored_p2sh, None); + + let auth_p2wsh = TransactionAuth::from_p2wsh(&[privk_1, privk_2], 2).unwrap(); + let auth_sponsored_p2wsh = auth_p2wsh.clone().into_sponsored( + TransactionAuth::from_p2wsh(&[privk_1, privk_2], 2).unwrap() + ).unwrap(); + + tx_auth_check_all_epochs(auth_p2wsh, None); + tx_auth_check_all_epochs(auth_sponsored_p2wsh, None); + + let auth_order_independent_p2sh = TransactionAuth::from_order_independent_p2sh(&[privk_1, privk_2], 2).unwrap(); + let auth_sponsored_order_independent_p2sh = auth_order_independent_p2sh.clone().into_sponsored( + TransactionAuth::from_order_independent_p2sh(&[privk_1, privk_2], 2).unwrap() + ).unwrap(); + + tx_auth_check_all_epochs(auth_order_independent_p2sh, Some(StacksEpochId::Epoch30)); + tx_auth_check_all_epochs(auth_sponsored_order_independent_p2sh, Some(StacksEpochId::Epoch30)); + + let auth_order_independent_p2wsh = TransactionAuth::from_order_independent_p2wsh(&[privk_1, privk_2], 2).unwrap(); + let auth_sponsored_order_independent_p2wsh = auth_order_independent_p2wsh.clone().into_sponsored( + TransactionAuth::from_order_independent_p2wsh(&[privk_1, privk_2], 2).unwrap() + ).unwrap(); + + tx_auth_check_all_epochs(auth_order_independent_p2wsh, Some(StacksEpochId::Epoch30)); + tx_auth_check_all_epochs(auth_sponsored_order_independent_p2wsh, Some(StacksEpochId::Epoch30)); + } } diff --git a/stackslib/src/chainstate/stacks/block.rs b/stackslib/src/chainstate/stacks/block.rs index 2932231103a..6ede2bc8e68 100644 --- a/stackslib/src/chainstate/stacks/block.rs +++ b/stackslib/src/chainstate/stacks/block.rs @@ -21,7 +21,8 @@ use std::io::{Read, Write}; use sha2::{Digest, Sha512_256}; use stacks_common::codec::{ - read_next, write_next, Error as codec_error, StacksMessageCodec, MAX_MESSAGE_LEN, + read_next, read_next_at_most, write_next, Error as codec_error, StacksMessageCodec, + MAX_MESSAGE_LEN, }; use stacks_common::types::chainstate::{ BlockHeaderHash, BurnchainHeaderHash, StacksBlockId, StacksWorkScore, TrieHash, VRFSeed, @@ -308,7 +309,7 @@ impl StacksMessageCodec for StacksBlock { let header: StacksBlockHeader = read_next(fd)?; let txs: Vec = { let mut bound_read = BoundReader::from_reader(fd, MAX_MESSAGE_LEN as u64); - read_next(&mut bound_read) + read_next_at_most(&mut bound_read, u32::MAX) }?; // there must be at least one transaction (the coinbase) @@ -569,37 +570,52 @@ impl StacksBlock { epoch_id: StacksEpochId, ) -> bool { for tx in txs.iter() { - if let TransactionPayload::Coinbase(_, ref recipient_opt, ref proof_opt) = &tx.payload { - if proof_opt.is_some() && epoch_id < StacksEpochId::Epoch30 { - // not supported - error!("Coinbase with VRF proof not supported before Stacks 3.0"; "txid" => %tx.txid()); - return false; - } - if proof_opt.is_none() && epoch_id >= StacksEpochId::Epoch30 { - // not supported - error!("Coinbase with VRF proof is required in Stacks 3.0 and later"; "txid" => %tx.txid()); - return false; - } - if recipient_opt.is_some() && epoch_id < StacksEpochId::Epoch21 { - // not supported - error!("Coinbase pay-to-alt-recipient not supported before Stacks 2.1"; "txid" => %tx.txid()); - return false; - } + if !StacksBlock::validate_transaction_static_epoch(tx, epoch_id) { + return false; } - if let TransactionPayload::SmartContract(_, ref version_opt) = &tx.payload { - if version_opt.is_some() && epoch_id < StacksEpochId::Epoch21 { - // not supported - error!("Versioned smart contracts not supported before Stacks 2.1"); - return false; - } + } + return true; + } + + /// Verify that one transaction is supported in the given epoch, as indicated by `epoch_id` + pub fn validate_transaction_static_epoch( + tx: &StacksTransaction, + epoch_id: StacksEpochId, + ) -> bool { + if let TransactionPayload::Coinbase(_, ref recipient_opt, ref proof_opt) = &tx.payload { + if proof_opt.is_some() && epoch_id < StacksEpochId::Epoch30 { + // not supported + error!("Coinbase with VRF proof not supported before Stacks 3.0"; "txid" => %tx.txid()); + return false; } - if let TransactionPayload::TenureChange(..) = &tx.payload { - if epoch_id < StacksEpochId::Epoch30 { - error!("TenureChange transaction not supported before Stacks 3.0"; "txid" => %tx.txid()); - return false; - } + if proof_opt.is_none() && epoch_id >= StacksEpochId::Epoch30 { + // not supported + error!("Coinbase with VRF proof is required in Stacks 3.0 and later"; "txid" => %tx.txid()); + return false; + } + if recipient_opt.is_some() && epoch_id < StacksEpochId::Epoch21 { + // not supported + error!("Coinbase pay-to-alt-recipient not supported before Stacks 2.1"; "txid" => %tx.txid()); + return false; + } + } + if let TransactionPayload::SmartContract(_, ref version_opt) = &tx.payload { + if version_opt.is_some() && epoch_id < StacksEpochId::Epoch21 { + // not supported + error!("Versioned smart contracts not supported before Stacks 2.1"); + return false; } } + if let TransactionPayload::TenureChange(..) = &tx.payload { + if epoch_id < StacksEpochId::Epoch30 { + error!("TenureChange transaction not supported before Stacks 3.0"; "txid" => %tx.txid()); + return false; + } + } + if !tx.auth.is_supported_in_epoch(epoch_id) { + error!("Authentication mode not supported in Epoch {epoch_id}"); + return false; + } return true; } @@ -1061,7 +1077,7 @@ mod test { signature: MessageSignature([0x0cu8; 65]), }; - let mut block = make_codec_test_block(100000000); + let mut block = make_codec_test_block(100000000, StacksEpochId::latest()); block.header.version = 0x24; let ph = block.header.parent_block.as_bytes().to_vec(); @@ -1125,6 +1141,7 @@ mod test { 0x80000000, &TransactionAnchorMode::OffChainOnly, &TransactionPostConditionMode::Allow, + StacksEpochId::latest(), ); // remove all coinbases @@ -1294,6 +1311,7 @@ mod test { let mut block_commit = LeaderBlockCommitOp { sunset_burn: 0, + treatment: vec![], block_header_hash: header.block_hash(), new_seed: VRFSeed::from_proof(&header.proof), parent_block_ptr: 0, @@ -1672,6 +1690,103 @@ mod test { } } + fn verify_block_epoch_validation( + txs: &[StacksTransaction], + tx_coinbase_old: Option, + tx_coinbase_nakamoto: Option, + activation_epoch_id: StacksEpochId, + header: StacksBlockHeader, + deactivation_epoch_id: Option, + ) { + let epoch_list = [ + StacksEpochId::Epoch10, + StacksEpochId::Epoch20, + StacksEpochId::Epoch2_05, + StacksEpochId::Epoch21, + StacksEpochId::Epoch22, + StacksEpochId::Epoch23, + StacksEpochId::Epoch24, + StacksEpochId::Epoch25, + StacksEpochId::Epoch30, + ]; + let get_tx_root = |txs: &Vec| { + let txid_vecs = txs.iter().map(|tx| tx.txid().as_bytes().to_vec()).collect(); + + let merkle_tree = MerkleTree::::new(&txid_vecs); + let tx_merkle_root = merkle_tree.root(); + tx_merkle_root + }; + let mut block_header_dup_tx = header.clone(); + block_header_dup_tx.tx_merkle_root = get_tx_root(&txs.to_vec()); + + let block = StacksBlock { + header: block_header_dup_tx.clone(), + txs: txs.to_vec(), + }; + + let block_with_coinbase_tx = tx_coinbase_old.map(|coinbase| { + let mut txs_with_coinbase = txs.to_vec(); + txs_with_coinbase.insert(0, coinbase); + + let mut block_header_dup_tx_with_coinbase = header.clone(); + block_header_dup_tx_with_coinbase.tx_merkle_root = + get_tx_root(&txs_with_coinbase.to_vec()); + + StacksBlock { + header: block_header_dup_tx_with_coinbase.clone(), + txs: txs_with_coinbase, + } + }); + + let block_with_coinbase_tx_nakamoto = tx_coinbase_nakamoto.map(|coinbase| { + let mut txs_with_coinbase_nakamoto = txs.to_vec(); + txs_with_coinbase_nakamoto.insert(0, coinbase); + + let mut block_header_dup_tx_with_coinbase_nakamoto = header.clone(); + block_header_dup_tx_with_coinbase_nakamoto.tx_merkle_root = + get_tx_root(&txs_with_coinbase_nakamoto.to_vec()); + + StacksBlock { + header: block_header_dup_tx_with_coinbase_nakamoto.clone(), + txs: txs_with_coinbase_nakamoto, + } + }); + + for epoch_id in epoch_list.iter() { + let block_to_check = if *epoch_id >= StacksEpochId::Epoch30 + && block_with_coinbase_tx_nakamoto.is_some() + { + block_with_coinbase_tx_nakamoto.clone().unwrap() + } else if *epoch_id >= StacksEpochId::Epoch21 + && *epoch_id < StacksEpochId::Epoch30 + && block_with_coinbase_tx.is_some() + { + block_with_coinbase_tx.clone().unwrap() + } else { + block.clone() + }; + + let mut bytes: Vec = vec![]; + block_to_check.consensus_serialize(&mut bytes).unwrap(); + + if *epoch_id < activation_epoch_id { + assert!(!StacksBlock::validate_transactions_static_epoch( + &txs, + epoch_id.clone(), + )); + } else if deactivation_epoch_id.is_none() || deactivation_epoch_id.unwrap() > *epoch_id + { + assert!(StacksBlock::validate_transactions_static_epoch( + &txs, *epoch_id, + )); + } else { + assert!(!StacksBlock::validate_transactions_static_epoch( + &txs, *epoch_id, + )); + } + } + } + #[test] fn test_block_validate_transactions_static() { let header = StacksBlockHeader { @@ -1689,6 +1804,11 @@ mod test { microblock_pubkey_hash: Hash160([9u8; 20]), }; + let stx_address = StacksAddress { + version: 0, + bytes: Hash160([0u8; 20]), + }; + let privk = StacksPrivateKey::from_hex( "6d430bb91222408e7706c9001cfaeb91b08c2be6d5ac95779ab52c6b431950e001", ) @@ -1699,6 +1819,135 @@ mod test { )) .unwrap(), ); + + let privk_1 = StacksPrivateKey::from_hex( + "6d430bb91222408e7706c9001cfaeb91b08c2be6d5ac95779ab52c6b431950e001", + ) + .unwrap(); + let privk_2 = StacksPrivateKey::from_hex( + "2a584d899fed1d24e26b524f202763c8ab30260167429f157f1c119f550fa6af01", + ) + .unwrap(); + let privk_3 = StacksPrivateKey::from_hex( + "d5200dee706ee53ae98a03fba6cf4fdcc5084c30cfa9e1b3462dcdeaa3e0f1d201", + ) + .unwrap(); + + let pubk_1 = StacksPublicKey::from_private(&privk_1); + let pubk_2 = StacksPublicKey::from_private(&privk_2); + let pubk_3 = StacksPublicKey::from_private(&privk_3); + + let order_independent_multisig_condition_p2wsh = + TransactionSpendingCondition::new_multisig_order_independent_p2wsh( + 2, + vec![pubk_1.clone(), pubk_2.clone(), pubk_3.clone()], + ) + .unwrap(); + + let order_independent_multisig_condition_p2sh = + TransactionSpendingCondition::new_multisig_order_independent_p2sh( + 2, + vec![pubk_1.clone(), pubk_2.clone(), pubk_3.clone()], + ) + .unwrap(); + + let order_independent_sponsored_auth_p2sh = TransactionAuth::Sponsored( + TransactionSpendingCondition::new_singlesig_p2pkh(StacksPublicKey::from_private( + &privk, + )) + .unwrap(), + order_independent_multisig_condition_p2sh.clone(), + ); + + let order_independent_sponsored_auth_p2wsh = TransactionAuth::Sponsored( + TransactionSpendingCondition::new_singlesig_p2pkh(StacksPublicKey::from_private( + &privk, + )) + .unwrap(), + order_independent_multisig_condition_p2wsh.clone(), + ); + let order_independent_origin_auth_p2sh = + TransactionAuth::Standard(order_independent_multisig_condition_p2sh.clone()); + + let order_independent_origin_auth_p2wsh = + TransactionAuth::Standard(order_independent_multisig_condition_p2wsh.clone()); + + let order_independent_multisig_tx_transfer_mainnet_p2sh = StacksTransaction::new( + TransactionVersion::Mainnet, + order_independent_origin_auth_p2sh.clone(), + TransactionPayload::TokenTransfer( + stx_address.into(), + 123, + TokenTransferMemo([1u8; 34]), + ), + ); + + let order_independent_multisig_tx_transfer_mainnet_p2wsh = StacksTransaction::new( + TransactionVersion::Mainnet, + order_independent_origin_auth_p2wsh.clone(), + TransactionPayload::TokenTransfer( + stx_address.into(), + 123, + TokenTransferMemo([1u8; 34]), + ), + ); + + let order_independent_sponsored_multisig_tx_transfer_mainnet_p2sh = StacksTransaction::new( + TransactionVersion::Mainnet, + order_independent_sponsored_auth_p2sh.clone(), + TransactionPayload::TokenTransfer( + stx_address.into(), + 123, + TokenTransferMemo([1u8; 34]), + ), + ); + + let order_independent_sponsored_multisig_tx_transfer_mainnet_p2wsh = StacksTransaction::new( + TransactionVersion::Mainnet, + order_independent_sponsored_auth_p2wsh.clone(), + TransactionPayload::TokenTransfer( + stx_address.into(), + 123, + TokenTransferMemo([1u8; 34]), + ), + ); + + let mut tx_signer = + StacksTransactionSigner::new(&order_independent_multisig_tx_transfer_mainnet_p2sh); + tx_signer.sign_origin(&privk_1).unwrap(); + tx_signer.sign_origin(&privk_2).unwrap(); + tx_signer.append_origin(&pubk_3).unwrap(); + let order_independent_multisig_tx_transfer_mainnet_p2sh_signed = + tx_signer.get_tx().unwrap(); + + let mut tx_signer = + StacksTransactionSigner::new(&order_independent_multisig_tx_transfer_mainnet_p2wsh); + tx_signer.sign_origin(&privk_1).unwrap(); + tx_signer.sign_origin(&privk_2).unwrap(); + tx_signer.append_origin(&pubk_3).unwrap(); + let order_independent_multisig_tx_transfer_mainnet_p2wsh_signed = + tx_signer.get_tx().unwrap(); + + let mut tx_signer = StacksTransactionSigner::new( + &order_independent_sponsored_multisig_tx_transfer_mainnet_p2sh, + ); + tx_signer.sign_origin(&privk).unwrap(); + tx_signer.sign_sponsor(&privk_1).unwrap(); + tx_signer.sign_sponsor(&privk_2).unwrap(); + tx_signer.append_sponsor(&pubk_3).unwrap(); + let order_independent_sponsored_multisig_tx_transfer_mainnet_p2sh_signed = + tx_signer.get_tx().unwrap(); + + let mut tx_signer = StacksTransactionSigner::new( + &order_independent_sponsored_multisig_tx_transfer_mainnet_p2wsh, + ); + tx_signer.sign_origin(&privk).unwrap(); + tx_signer.sign_sponsor(&privk_1).unwrap(); + tx_signer.sign_sponsor(&privk_2).unwrap(); + tx_signer.append_sponsor(&pubk_3).unwrap(); + let order_independent_sponsored_multisig_tx_transfer_mainnet_p2wsh_signed = + tx_signer.get_tx().unwrap(); + let tx_coinbase = StacksTransaction::new( TransactionVersion::Testnet, origin_auth.clone(), @@ -1810,6 +2059,12 @@ mod test { let nakamoto_coinbase = vec![tx_coinbase_proof.clone()]; let tenure_change_tx = vec![tx_tenure_change.clone()]; let nakamoto_txs = vec![tx_coinbase_proof.clone(), tx_tenure_change.clone()]; + let order_independent_multisig_txs = vec![ + order_independent_multisig_tx_transfer_mainnet_p2sh_signed.clone(), + order_independent_sponsored_multisig_tx_transfer_mainnet_p2sh_signed.clone(), + order_independent_multisig_tx_transfer_mainnet_p2wsh_signed.clone(), + order_independent_sponsored_multisig_tx_transfer_mainnet_p2wsh_signed.clone(), + ]; assert!(!StacksBlock::validate_transactions_unique(&dup_txs)); assert!(!StacksBlock::validate_transactions_network( @@ -1822,47 +2077,55 @@ mod test { )); assert!(!StacksBlock::validate_anchor_mode(&offchain_txs, true)); assert!(!StacksBlock::validate_coinbase(&no_coinbase, true)); - assert!(!StacksBlock::validate_transactions_static_epoch( - &coinbase_contract, - StacksEpochId::Epoch2_05 - )); - assert!(StacksBlock::validate_transactions_static_epoch( - &coinbase_contract, - StacksEpochId::Epoch21 - )); - assert!(!StacksBlock::validate_transactions_static_epoch( - &versioned_contract, - StacksEpochId::Epoch2_05 - )); - assert!(StacksBlock::validate_transactions_static_epoch( + verify_block_epoch_validation( &versioned_contract, - StacksEpochId::Epoch21 - )); - assert!(!StacksBlock::validate_transactions_static_epoch( - &nakamoto_coinbase, - StacksEpochId::Epoch21 - )); - assert!(StacksBlock::validate_transactions_static_epoch( - &nakamoto_coinbase, - StacksEpochId::Epoch30 - )); - assert!(!StacksBlock::validate_transactions_static_epoch( + Some(tx_coinbase.clone()), + Some(tx_coinbase_proof.clone()), + StacksEpochId::Epoch21, + header.clone(), + None, + ); + verify_block_epoch_validation( &coinbase_contract, - StacksEpochId::Epoch30 - )); - assert!(!StacksBlock::validate_transactions_static_epoch( - &tenure_change_tx, - StacksEpochId::Epoch21 - )); - assert!(StacksBlock::validate_transactions_static_epoch( - &nakamoto_txs, - StacksEpochId::Epoch30 - )); - assert!(!StacksBlock::validate_transactions_static_epoch( + None, + None, + StacksEpochId::Epoch21, + header.clone(), + Some(StacksEpochId::Epoch30), + ); + verify_block_epoch_validation( + &order_independent_multisig_txs, + Some(tx_coinbase.clone()), + Some(tx_coinbase_proof.clone()), + StacksEpochId::Epoch30, + header.clone(), + None, + ); + verify_block_epoch_validation( &nakamoto_txs, - StacksEpochId::Epoch21 - )); + Some(tx_coinbase.clone()), + None, + StacksEpochId::Epoch30, + header.clone(), + None, + ); + verify_block_epoch_validation( + &nakamoto_coinbase, + Some(tx_coinbase.clone()), + None, + StacksEpochId::Epoch30, + header.clone(), + None, + ); + verify_block_epoch_validation( + &tenure_change_tx, + Some(tx_coinbase.clone()), + Some(tx_coinbase_proof.clone()), + StacksEpochId::Epoch30, + header.clone(), + None, + ); } // TODO: diff --git a/stackslib/src/chainstate/stacks/boot/contract_tests.rs b/stackslib/src/chainstate/stacks/boot/contract_tests.rs index 23f2c920088..1a47613c89f 100644 --- a/stackslib/src/chainstate/stacks/boot/contract_tests.rs +++ b/stackslib/src/chainstate/stacks/boot/contract_tests.rs @@ -3,7 +3,7 @@ use std::collections::{HashMap, VecDeque}; use clarity::vm::analysis::arithmetic_checker::ArithmeticOnlyChecker; use clarity::vm::analysis::mem_type_check; use clarity::vm::ast::ASTRules; -use clarity::vm::clarity::TransactionConnection; +use clarity::vm::clarity::{ClarityConnection, TransactionConnection}; use clarity::vm::contexts::OwnedEnvironment; use clarity::vm::contracts::Contract; use clarity::vm::costs::CostOverflowingMath; @@ -84,7 +84,8 @@ lazy_static! { pub struct ClarityTestSim { marf: MarfedKV, - pub height: u64, + pub block_height: u64, + pub tenure_height: u64, fork: u64, /// This vec specifies the transitions for each epoch. /// It is a list of heights at which the simulated chain transitions @@ -134,33 +135,52 @@ impl ClarityTestSim { ClarityTestSim { marf, - height: 0, + block_height: 0, + tenure_height: 0, fork: 0, epoch_bounds: vec![0, u64::MAX], } } - pub fn execute_next_block_as_conn(&mut self, f: F) -> R + pub fn burn_block_height(&self) -> u64 { + self.tenure_height + 100 + } + + pub fn execute_next_block_as_conn_with_tenure(&mut self, new_tenure: bool, f: F) -> R where F: FnOnce(&mut ClarityBlockConnection) -> R, { let r = { let mut store = self.marf.begin( - &StacksBlockId(test_sim_height_to_hash(self.height, self.fork)), - &StacksBlockId(test_sim_height_to_hash(self.height + 1, self.fork)), + &StacksBlockId(test_sim_height_to_hash(self.block_height, self.fork)), + &StacksBlockId(test_sim_height_to_hash(self.block_height + 1, self.fork)), ); + self.block_height += 1; + if new_tenure { + self.tenure_height += 1; + } + let headers_db = TestSimHeadersDB { - height: self.height + 1, + height: self.block_height, }; let burn_db = TestSimBurnStateDB { epoch_bounds: self.epoch_bounds.clone(), pox_constants: PoxConstants::test_default(), - height: (self.height + 100).try_into().unwrap(), + height: (self.tenure_height + 100).try_into().unwrap(), }; let cur_epoch = Self::check_and_bump_epoch(&mut store, &headers_db, &burn_db); + let mut db = store.as_clarity_db(&headers_db, &burn_db); + if cur_epoch.clarity_uses_tip_burn_block() { + db.begin(); + db.set_tenure_height(self.tenure_height as u32) + .expect("FAIL: unable to set tenure height in Clarity database"); + db.commit() + .expect("FAIL: unable to commit tenure height in Clarity database"); + } + let mut block_conn = ClarityBlockConnection::new_test_conn(store, &headers_db, &burn_db, cur_epoch); let r = f(&mut block_conn); @@ -169,43 +189,67 @@ impl ClarityTestSim { r }; - self.height += 1; r } - pub fn execute_next_block(&mut self, f: F) -> R + pub fn execute_next_block_as_conn(&mut self, f: F) -> R + where + F: FnOnce(&mut ClarityBlockConnection) -> R, + { + self.execute_next_block_as_conn_with_tenure(true, f) + } + + pub fn execute_next_block_with_tenure(&mut self, new_tenure: bool, f: F) -> R where F: FnOnce(&mut OwnedEnvironment) -> R, { let mut store = self.marf.begin( - &StacksBlockId(test_sim_height_to_hash(self.height, self.fork)), - &StacksBlockId(test_sim_height_to_hash(self.height + 1, self.fork)), + &StacksBlockId(test_sim_height_to_hash(self.block_height, self.fork)), + &StacksBlockId(test_sim_height_to_hash(self.block_height + 1, self.fork)), ); + self.block_height += 1; + if new_tenure { + self.tenure_height += 1; + } + let r = { let headers_db = TestSimHeadersDB { - height: self.height + 1, + height: self.block_height, }; let burn_db = TestSimBurnStateDB { epoch_bounds: self.epoch_bounds.clone(), pox_constants: PoxConstants::test_default(), - height: (self.height + 100).try_into().unwrap(), + height: (self.tenure_height + 100).try_into().unwrap(), }; let cur_epoch = Self::check_and_bump_epoch(&mut store, &headers_db, &burn_db); debug!("Execute block in epoch {}", &cur_epoch); - let db = store.as_clarity_db(&headers_db, &burn_db); + let mut db = store.as_clarity_db(&headers_db, &burn_db); + if cur_epoch.clarity_uses_tip_burn_block() { + db.begin(); + db.set_tenure_height(self.tenure_height as u32) + .expect("FAIL: unable to set tenure height in Clarity database"); + db.commit() + .expect("FAIL: unable to commit tenure height in Clarity database"); + } let mut owned_env = OwnedEnvironment::new_toplevel(db); f(&mut owned_env) }; store.test_commit(); - self.height += 1; r } + pub fn execute_next_block(&mut self, f: F) -> R + where + F: FnOnce(&mut OwnedEnvironment) -> R, + { + self.execute_next_block_with_tenure(true, f) + } + fn check_and_bump_epoch( store: &mut WritableMarfStore, headers_db: &TestSimHeadersDB, @@ -253,7 +297,8 @@ impl ClarityTestSim { }; store.test_commit(); - self.height = parent_height + 1; + self.block_height = parent_height + 1; + self.tenure_height = parent_height + 1; self.fork += 1; r @@ -308,6 +353,14 @@ fn cost_2_contract_is_arithmetic_only() { } impl BurnStateDB for TestSimBurnStateDB { + fn get_tip_burn_block_height(&self) -> Option { + Some(self.height as u32) + } + + fn get_tip_sortition_id(&self) -> Option { + panic!("Not implemented in TestSim"); + } + fn get_burn_block_height(&self, sortition_id: &SortitionId) -> Option { panic!("Not implemented in TestSim"); } @@ -370,7 +423,10 @@ impl BurnStateDB for TestSimBurnStateDB { 2 => StacksEpochId::Epoch21, 3 => StacksEpochId::Epoch22, 4 => StacksEpochId::Epoch23, - _ => panic!("Epoch unknown"), + 5 => StacksEpochId::Epoch24, + 6 => StacksEpochId::Epoch25, + 7 => StacksEpochId::Epoch30, + _ => panic!("Invalid epoch index"), }; Some(StacksEpoch { @@ -485,11 +541,19 @@ impl HeadersDB for TestSimHeadersDB { } } - fn get_vrf_seed_for_block(&self, _bhh: &StacksBlockId) -> Option { + fn get_vrf_seed_for_block( + &self, + _bhh: &StacksBlockId, + _epoch: &StacksEpochId, + ) -> Option { None } - fn get_consensus_hash_for_block(&self, bhh: &StacksBlockId) -> Option { + fn get_consensus_hash_for_block( + &self, + bhh: &StacksBlockId, + _epoch: &StacksEpochId, + ) -> Option { // capture the first 20 bytes of the block ID, which in this case captures the height and // fork ID. let mut bytes_20 = [0u8; 20]; @@ -500,6 +564,7 @@ impl HeadersDB for TestSimHeadersDB { fn get_stacks_block_header_hash_for_block( &self, id_bhh: &StacksBlockId, + _epoch: &StacksEpochId, ) -> Option { if *id_bhh == *FIRST_INDEX_BLOCK_HASH { Some(FIRST_STACKS_BLOCK_HASH) @@ -511,7 +576,11 @@ impl HeadersDB for TestSimHeadersDB { } } - fn get_burn_block_time_for_block(&self, id_bhh: &StacksBlockId) -> Option { + fn get_burn_block_time_for_block( + &self, + id_bhh: &StacksBlockId, + _epoch: Option<&StacksEpochId>, + ) -> Option { if *id_bhh == *FIRST_INDEX_BLOCK_HASH { Some(BITCOIN_REGTEST_FIRST_BLOCK_TIMESTAMP as u64) } else { @@ -523,6 +592,11 @@ impl HeadersDB for TestSimHeadersDB { } } + fn get_stacks_block_time_for_block(&self, id_bhh: &StacksBlockId) -> Option { + let block_height = test_sim_hash_to_height(&id_bhh.0)?; + Some(1713799973 + block_height) + } + fn get_burn_block_height_for_block(&self, id_bhh: &StacksBlockId) -> Option { if *id_bhh == *FIRST_INDEX_BLOCK_HASH { Some(BITCOIN_REGTEST_FIRST_BLOCK_HEIGHT as u32) @@ -541,21 +615,37 @@ impl HeadersDB for TestSimHeadersDB { } } - fn get_miner_address(&self, _id_bhh: &StacksBlockId) -> Option { + fn get_miner_address( + &self, + _id_bhh: &StacksBlockId, + _epoch: &StacksEpochId, + ) -> Option { Some(MINER_ADDR.clone()) } - fn get_burnchain_tokens_spent_for_block(&self, id_bhh: &StacksBlockId) -> Option { + fn get_burnchain_tokens_spent_for_block( + &self, + id_bhh: &StacksBlockId, + _epoch: &StacksEpochId, + ) -> Option { // if the block is defined at all, then return a constant self.get_burn_block_height_for_block(id_bhh).map(|_| 2000) } - fn get_burnchain_tokens_spent_for_winning_block(&self, id_bhh: &StacksBlockId) -> Option { + fn get_burnchain_tokens_spent_for_winning_block( + &self, + id_bhh: &StacksBlockId, + _epoch: &StacksEpochId, + ) -> Option { // if the block is defined at all, then return a constant self.get_burn_block_height_for_block(id_bhh).map(|_| 1000) } - fn get_tokens_earned_for_block(&self, id_bhh: &StacksBlockId) -> Option { + fn get_tokens_earned_for_block( + &self, + id_bhh: &StacksBlockId, + _epoch: &StacksEpochId, + ) -> Option { // if the block is defined at all, then return a constant self.get_burn_block_height_for_block(id_bhh).map(|_| 3000) } @@ -2703,7 +2793,7 @@ fn test_vote_fail() { ); }); - let fork_start = sim.height; + let fork_start = sim.block_height; for i in 0..25 { sim.execute_next_block(|env| { diff --git a/stackslib/src/chainstate/stacks/boot/docs.rs b/stackslib/src/chainstate/stacks/boot/docs.rs index 62580f384a1..28066abc712 100644 --- a/stackslib/src/chainstate/stacks/boot/docs.rs +++ b/stackslib/src/chainstate/stacks/boot/docs.rs @@ -1,4 +1,20 @@ +// Copyright (C) 2013-2020 Blockstack PBC, a public benefit corporation +// Copyright (C) 2020-2024 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . use clarity::vm::docs::contracts::{produce_docs_refs, ContractSupportDocs}; +use clarity::vm::ClarityVersion; use hashbrown::{HashMap, HashSet}; use super::STACKS_BOOT_CODE_MAINNET; @@ -139,7 +155,11 @@ If your name is in a namespace where names do not expire, then you never need to pub fn make_json_boot_contracts_reference() -> String { let contract_supporting_docs = make_contract_support_docs(); - let api_out = produce_docs_refs(&*STACKS_BOOT_CODE_MAINNET, &contract_supporting_docs); + let api_out = produce_docs_refs( + &*STACKS_BOOT_CODE_MAINNET, + &contract_supporting_docs, + ClarityVersion::Clarity1, + ); format!( "{}", serde_json::to_string(&api_out).expect("Failed to serialize documentation") diff --git a/stackslib/src/chainstate/stacks/boot/mod.rs b/stackslib/src/chainstate/stacks/boot/mod.rs index 539654d30a4..0f45d7a6d02 100644 --- a/stackslib/src/chainstate/stacks/boot/mod.rs +++ b/stackslib/src/chainstate/stacks/boot/mod.rs @@ -40,7 +40,9 @@ use serde::Deserialize; use stacks_common::address::AddressHashMode; use stacks_common::codec::StacksMessageCodec; use stacks_common::types; -use stacks_common::types::chainstate::{BlockHeaderHash, StacksAddress, StacksBlockId}; +use stacks_common::types::chainstate::{ + BlockHeaderHash, StacksAddress, StacksBlockId, StacksPublicKey, +}; use stacks_common::util::hash::{hex_bytes, to_hex, Hash160}; use wsts::curve::point::{Compressed, Point}; use wsts::curve::scalar::Scalar; @@ -49,7 +51,7 @@ use crate::burnchains::bitcoin::address::BitcoinAddress; use crate::burnchains::{Address, Burnchain, PoxConstants}; use crate::chainstate::burn::db::sortdb::SortitionDB; use crate::chainstate::stacks::address::{PoxAddress, StacksAddressExtensions}; -use crate::chainstate::stacks::db::StacksChainState; +use crate::chainstate::stacks::db::{StacksChainState, StacksDBConn}; use crate::chainstate::stacks::index::marf::MarfConnection; use crate::chainstate::stacks::Error; use crate::clarity_vm::clarity::{ClarityConnection, ClarityTransactionConnection}; @@ -119,7 +121,7 @@ lazy_static! { format!("{}\n{}", BOOT_CODE_POX_MAINNET_CONSTS, POX_3_BODY); pub static ref POX_3_TESTNET_CODE: String = format!("{}\n{}", BOOT_CODE_POX_TESTNET_CONSTS, POX_3_BODY); - pub static ref POX_4_CODE: String = format!("{}", POX_4_BODY); + pub static ref POX_4_CODE: String = POX_4_BODY.to_string(); pub static ref BOOT_CODE_COST_VOTING_TESTNET: String = make_testnet_cost_voting(); pub static ref STACKS_BOOT_CODE_MAINNET: [(&'static str, &'static str); 6] = [ ("pox", &BOOT_CODE_POX_MAINNET), @@ -275,6 +277,23 @@ impl RewardSet { pub fn metadata_deserialize(from: &str) -> Result { serde_json::from_str(from).map_err(|e| e.to_string()) } + + /// Return the total `weight` of all signers in the reward set. + /// If there are no reward set signers, a ChainstateError is returned. + pub fn total_signing_weight(&self) -> Result { + let Some(ref reward_set_signers) = self.signers else { + return Err(format!( + "Unable to calculate total weight - No signers in reward set" + )); + }; + Ok(reward_set_signers + .iter() + .map(|s| s.weight) + .fold(0, |s, acc| { + acc.checked_add(s) + .expect("FATAL: Total signer weight > u32::MAX") + })) + } } impl RewardSetData { @@ -423,6 +442,8 @@ impl StacksChainState { result } + // TODO: add tests from mutation testing results #4854 + #[cfg_attr(test, mutants::skip)] /// Do all the necessary Clarity operations at the start of a PoX reward cycle. /// Currently, this just means applying any auto-unlocks to Stackers who qualified. /// @@ -435,6 +456,8 @@ impl StacksChainState { Self::handle_pox_cycle_missed_unlocks(clarity, cycle_number, cycle_info, &PoxVersions::Pox2) } + // TODO: add tests from mutation testing results #4854 + #[cfg_attr(test, mutants::skip)] /// Do all the necessary Clarity operations at the start of a PoX reward cycle. /// Currently, this just means applying any auto-unlocks to Stackers who qualified. /// @@ -447,6 +470,8 @@ impl StacksChainState { Self::handle_pox_cycle_missed_unlocks(clarity, cycle_number, cycle_info, &PoxVersions::Pox3) } + // TODO: add tests from mutation testing results #4854 + #[cfg_attr(test, mutants::skip)] /// Do all the necessary Clarity operations at the start of a PoX reward cycle. /// Currently, this just means applying any auto-unlocks to Stackers who qualified. /// @@ -460,6 +485,8 @@ impl StacksChainState { Ok(vec![]) } + // TODO: add tests from mutation testing results #4854 + #[cfg_attr(test, mutants::skip)] /// Do all the necessary Clarity operations at the start of a PoX reward cycle. /// Currently, this just means applying any auto-unlocks to Stackers who qualified. /// @@ -569,12 +596,13 @@ impl StacksChainState { boot_contract_name: &str, code: &str, ) -> Result { - let iconn = sortdb.index_conn(); - let dbconn = self.state_index.sqlite_conn(); + let iconn = sortdb.index_handle_at_block(self, stacks_block_id)?; + let ro_index = self.state_index.reopen_readonly()?; + let headers_db = HeadersDBConn(StacksDBConn::new(&ro_index, ())); self.clarity_state .eval_read_only( &stacks_block_id, - &HeadersDBConn(dbconn), + &headers_db, &iconn, &boot::boot_code_id(boot_contract_name, self.mainnet), code, @@ -631,24 +659,28 @@ impl StacksChainState { let cost_track = LimitedCostTracker::new_free(); let sender = PrincipalData::Standard(StandardPrincipalData::transient()); let result = self - .maybe_read_only_clarity_tx(&sortdb.index_conn(), tip, |clarity_tx| { - clarity_tx.with_readonly_clarity_env( - mainnet, - chain_id, - ClarityVersion::Clarity1, - sender, - None, - cost_track, - |env| { - env.execute_contract( - &contract_identifier, - function, - &[SymbolicExpression::atom_value(Value::UInt(reward_cycle))], - true, - ) - }, - ) - })? + .maybe_read_only_clarity_tx( + &sortdb.index_handle_at_block(self, tip)?, + tip, + |clarity_tx| { + clarity_tx.with_readonly_clarity_env( + mainnet, + chain_id, + ClarityVersion::Clarity1, + sender, + None, + cost_track, + |env| { + env.execute_contract( + &contract_identifier, + function, + &[SymbolicExpression::atom_value(Value::UInt(reward_cycle))], + true, + ) + }, + ) + }, + )? .ok_or_else(|| Error::NoSuchBlockError)?? .expect_u128() .expect("FATAL: unexpected PoX structure"); @@ -752,6 +784,8 @@ impl StacksChainState { Some(signer_set) } + // TODO: add tests from mutation testing results #4855 + #[cfg_attr(test, mutants::skip)] /// Given a threshold and set of registered addresses, return a reward set where /// every entry address has stacked more than the threshold, and addresses /// are repeated floor(stacked_amt / threshold) times. @@ -1355,8 +1389,6 @@ pub mod pox_3_tests; pub mod pox_4_tests; #[cfg(test)] pub mod signers_tests; -#[cfg(test)] -pub mod signers_voting_tests; #[cfg(test)] pub mod test { @@ -1371,6 +1403,7 @@ pub mod test { use stacks_common::util::secp256k1::Secp256k1PublicKey; use stacks_common::util::*; + use self::signers_tests::readonly_call; use super::*; use crate::burnchains::{Address, PublicKey}; use crate::chainstate::burn::db::sortdb::*; @@ -1667,7 +1700,7 @@ pub mod test { let (consensus_hash, block_bhh) = SortitionDB::get_canonical_stacks_chain_tip_hash(sortdb.conn()).unwrap(); let stacks_block_id = StacksBlockId::new(&consensus_hash, &block_bhh); - let iconn = sortdb.index_conn(); + let iconn = sortdb.index_handle_at_tip(); let value = peer.chainstate().clarity_eval_read_only( &iconn, &stacks_block_id, @@ -1695,7 +1728,7 @@ pub mod test { let (consensus_hash, block_bhh) = SortitionDB::get_canonical_stacks_chain_tip_hash(sortdb.conn()).unwrap(); let stacks_block_id = StacksBlockId::new(&consensus_hash, &block_bhh); - let iconn = sortdb.index_conn(); + let iconn = sortdb.index_handle_at_tip(); let value = peer.chainstate().clarity_eval_read_only( &iconn, &stacks_block_id, @@ -1728,6 +1761,58 @@ pub mod test { } } + pub fn get_stacker_info_pox_4( + peer: &mut TestPeer, + addr: &PrincipalData, + ) -> Option<(PoxAddress, u128, u128, Vec)> { + let value_opt = eval_at_tip( + peer, + "pox-4", + &format!("(get-stacker-info '{})", addr.to_string()), + ); + let data = if let Some(d) = value_opt.expect_optional().unwrap() { + d + } else { + return None; + }; + + let data = data.expect_tuple().unwrap(); + let pox_addr = tuple_to_pox_addr( + data.get("pox-addr") + .unwrap() + .to_owned() + .expect_tuple() + .unwrap(), + ); + let first_reward_cycle = data + .get("first-reward-cycle") + .unwrap() + .to_owned() + .expect_u128() + .unwrap(); + let lock_period = data + .get("lock-period") + .unwrap() + .to_owned() + .expect_u128() + .unwrap(); + let reward_set_indices = data + .get("reward-set-indexes") + .unwrap() + .to_owned() + .expect_list() + .unwrap() + .iter() + .map(|v| v.to_owned().expect_u128().unwrap()) + .collect(); + Some(( + pox_addr, + first_reward_cycle, + lock_period, + reward_set_indices, + )) + } + pub fn get_stacker_info( peer: &mut TestPeer, addr: &PrincipalData, @@ -1789,9 +1874,13 @@ pub mod test { SortitionDB::get_canonical_stacks_chain_tip_hash(sortdb.conn()).unwrap(); let stacks_block_id = StacksBlockId::new(&consensus_hash, &block_bhh); chainstate - .with_read_only_clarity_tx(&sortdb.index_conn(), &stacks_block_id, |clarity_tx| { - StacksChainState::get_account(clarity_tx, addr) - }) + .with_read_only_clarity_tx( + &sortdb + .index_handle_at_block(&chainstate, &stacks_block_id) + .unwrap(), + &stacks_block_id, + |clarity_tx| StacksChainState::get_account(clarity_tx, addr), + ) .unwrap() }); account @@ -1803,9 +1892,13 @@ pub mod test { SortitionDB::get_canonical_stacks_chain_tip_hash(sortdb.conn()).unwrap(); let stacks_block_id = StacksBlockId::new(&consensus_hash, &block_bhh); chainstate - .with_read_only_clarity_tx(&sortdb.index_conn(), &stacks_block_id, |clarity_tx| { - StacksChainState::get_contract(clarity_tx, addr).unwrap() - }) + .with_read_only_clarity_tx( + &sortdb + .index_handle_at_block(chainstate, &stacks_block_id) + .unwrap(), + &stacks_block_id, + |clarity_tx| StacksChainState::get_contract(clarity_tx, addr).unwrap(), + ) .unwrap() }); contract_opt @@ -1987,6 +2080,27 @@ pub mod test { make_tx(key, nonce, 1, payload) } + pub fn get_approved_aggregate_key( + peer: &mut TestPeer<'_>, + latest_block_id: StacksBlockId, + reward_cycle: u128, + ) -> Option { + let key_opt = readonly_call( + peer, + &latest_block_id, + SIGNERS_VOTING_NAME.into(), + "get-approved-aggregate-key".into(), + vec![Value::UInt(reward_cycle)], + ) + .expect_optional() + .unwrap(); + key_opt.map(|key_value| { + let data = key_value.expect_buff(33).unwrap(); + let compressed_data = Compressed::try_from(data.as_slice()).unwrap(); + Point::try_from(&compressed_data).unwrap() + }) + } + pub fn make_pox_2_increase( key: &StacksPrivateKey, nonce: u64, @@ -2725,7 +2839,7 @@ pub mod test { StacksBlockBuilder::make_anchored_block_from_txs( block_builder, chainstate, - &sortdb.index_conn(), + &sortdb.index_handle_at_tip(), block_txs, ) .unwrap(); @@ -2852,7 +2966,7 @@ pub mod test { StacksBlockBuilder::make_anchored_block_from_txs( block_builder, chainstate, - &sortdb.index_conn(), + &sortdb.index_handle_at_tip(), block_txs, ) .unwrap(); @@ -2938,7 +3052,7 @@ pub mod test { let block_builder = StacksBlockBuilder::make_regtest_block_builder(&burnchain, &parent_tip, vrf_proof, tip.total_burn, microblock_pubkeyhash).unwrap(); - let (anchored_block, _size, _cost) = StacksBlockBuilder::make_anchored_block_from_txs(block_builder, chainstate, &sortdb.index_conn(), block_txs).unwrap(); + let (anchored_block, _size, _cost) = StacksBlockBuilder::make_anchored_block_from_txs(block_builder, chainstate, &sortdb.index_handle_at_tip(), block_txs).unwrap(); (anchored_block, vec![]) }); @@ -3044,7 +3158,7 @@ pub mod test { StacksBlockBuilder::make_anchored_block_from_txs( block_builder, chainstate, - &sortdb.index_conn(), + &sortdb.index_handle_at_tip(), block_txs, ) .unwrap(); @@ -3155,7 +3269,7 @@ pub mod test { StacksBlockBuilder::make_anchored_block_from_txs( block_builder, chainstate, - &sortdb.index_conn(), + &sortdb.index_handle_at_tip(), block_txs, ) .unwrap(); @@ -3373,7 +3487,7 @@ pub mod test { StacksBlockBuilder::make_anchored_block_from_txs( block_builder, chainstate, - &sortdb.index_conn(), + &sortdb.index_handle_at_tip(), block_txs, ) .unwrap(); @@ -3631,7 +3745,7 @@ pub mod test { StacksBlockBuilder::make_anchored_block_from_txs( block_builder, chainstate, - &sortdb.index_conn(), + &sortdb.index_handle_at_tip(), block_txs, ) .unwrap(); @@ -3906,7 +4020,7 @@ pub mod test { StacksBlockBuilder::make_anchored_block_from_txs( block_builder, chainstate, - &sortdb.index_conn(), + &sortdb.index_handle_at_tip(), block_txs, ) .unwrap(); @@ -4150,7 +4264,7 @@ pub mod test { let block_builder = StacksBlockBuilder::make_regtest_block_builder(&burnchain, &parent_tip, vrf_proof, tip.total_burn, microblock_pubkeyhash).unwrap(); - let (anchored_block, _size, _cost) = StacksBlockBuilder::make_anchored_block_from_txs(block_builder, chainstate, &sortdb.index_conn(), block_txs).unwrap(); + let (anchored_block, _size, _cost) = StacksBlockBuilder::make_anchored_block_from_txs(block_builder, chainstate, &sortdb.index_handle_at_tip(), block_txs).unwrap(); (anchored_block, vec![]) }); @@ -4323,7 +4437,7 @@ pub mod test { StacksBlockBuilder::make_anchored_block_from_txs( block_builder, chainstate, - &sortdb.index_conn(), + &sortdb.index_handle_at_tip(), block_txs, ) .unwrap(); @@ -4622,7 +4736,7 @@ pub mod test { StacksBlockBuilder::make_anchored_block_from_txs( block_builder, chainstate, - &sortdb.index_conn(), + &sortdb.index_handle_at_tip(), block_txs, ) .unwrap(); @@ -5203,7 +5317,7 @@ pub mod test { StacksBlockBuilder::make_anchored_block_from_txs( block_builder, chainstate, - &sortdb.index_conn(), + &sortdb.index_handle_at_tip(), block_txs, ) .unwrap(); @@ -5572,7 +5686,7 @@ pub mod test { } let block_builder = StacksBlockBuilder::make_regtest_block_builder(&burnchain, &parent_tip, vrf_proof, tip.total_burn, microblock_pubkeyhash).unwrap(); - let (anchored_block, _size, _cost) = StacksBlockBuilder::make_anchored_block_from_txs(block_builder, chainstate, &sortdb.index_conn(), block_txs).unwrap(); + let (anchored_block, _size, _cost) = StacksBlockBuilder::make_anchored_block_from_txs(block_builder, chainstate, &sortdb.index_handle_at_tip(), block_txs).unwrap(); if tenure_id == 2 { // block should be all the transactions diff --git a/stackslib/src/chainstate/stacks/boot/pox_2_tests.rs b/stackslib/src/chainstate/stacks/boot/pox_2_tests.rs index 2c47f0ec0bd..7ae25d00f6f 100644 --- a/stackslib/src/chainstate/stacks/boot/pox_2_tests.rs +++ b/stackslib/src/chainstate/stacks/boot/pox_2_tests.rs @@ -53,7 +53,7 @@ use crate::chainstate::stacks::boot::{ POX_3_NAME, }; use crate::chainstate::stacks::db::{ - MinerPaymentSchedule, StacksChainState, StacksHeaderInfo, MINER_REWARD_MATURITY, + MinerPaymentSchedule, StacksChainState, StacksDBConn, StacksHeaderInfo, MINER_REWARD_MATURITY, }; use crate::chainstate::stacks::events::TransactionOrigin; use crate::chainstate::stacks::index::marf::MarfConnection; @@ -666,8 +666,8 @@ where F: FnOnce(&mut ClarityDatabase) -> R, { with_sortdb(peer, |ref mut c, ref sortdb| { - let headers_db = HeadersDBConn(c.state_index.sqlite_conn()); - let burn_db = sortdb.index_conn(); + let headers_db = HeadersDBConn(StacksDBConn::new(&c.state_index, ())); + let burn_db = sortdb.index_handle_at_tip(); let mut read_only_clar = c .clarity_state .read_only_connection(tip, &headers_db, &burn_db); @@ -3794,7 +3794,7 @@ fn test_get_pox_addrs() { StacksBlockBuilder::make_anchored_block_from_txs( block_builder, chainstate, - &sortdb.index_conn(), + &sortdb.index_handle_at_tip(), block_txs, ) .unwrap(); @@ -3896,7 +3896,7 @@ fn test_get_pox_addrs() { let addrs_and_payout = with_sortdb(&mut peer, |ref mut chainstate, ref mut sortdb| { let addrs = chainstate .maybe_read_only_clarity_tx( - &sortdb.index_conn(), + &sortdb.index_handle_at_tip(), &tip_index_block, |clarity_tx| { clarity_tx @@ -4091,7 +4091,7 @@ fn test_stack_with_segwit() { StacksBlockBuilder::make_anchored_block_from_txs( block_builder, chainstate, - &sortdb.index_conn(), + &sortdb.index_handle_at_tip(), block_txs, ) .unwrap(); @@ -4193,7 +4193,7 @@ fn test_stack_with_segwit() { let addrs_and_payout = with_sortdb(&mut peer, |ref mut chainstate, ref mut sortdb| { let addrs = chainstate .maybe_read_only_clarity_tx( - &sortdb.index_conn(), + &sortdb.index_handle_at_tip(), &tip_index_block, |clarity_tx| { clarity_tx diff --git a/stackslib/src/chainstate/stacks/boot/pox_3_tests.rs b/stackslib/src/chainstate/stacks/boot/pox_3_tests.rs index f0c7a9ef75f..3134b4773a7 100644 --- a/stackslib/src/chainstate/stacks/boot/pox_3_tests.rs +++ b/stackslib/src/chainstate/stacks/boot/pox_3_tests.rs @@ -3348,24 +3348,28 @@ fn get_burn_pox_addr_info(peer: &mut TestPeer) -> (Vec, u128) { let burn_height = tip.block_height - 1; let addrs_and_payout = with_sortdb(peer, |ref mut chainstate, ref mut sortdb| { let addrs = chainstate - .maybe_read_only_clarity_tx(&sortdb.index_conn(), &tip_index_block, |clarity_tx| { - clarity_tx - .with_readonly_clarity_env( - false, - 0x80000000, - ClarityVersion::Clarity2, - PrincipalData::Standard(StandardPrincipalData::transient()), - None, - LimitedCostTracker::new_free(), - |env| { - env.eval_read_only( - &boot_code_id("pox-2", false), - &format!("(get-burn-block-info? pox-addrs u{})", &burn_height), - ) - }, - ) - .unwrap() - }) + .maybe_read_only_clarity_tx( + &sortdb.index_handle_at_tip(), + &tip_index_block, + |clarity_tx| { + clarity_tx + .with_readonly_clarity_env( + false, + 0x80000000, + ClarityVersion::Clarity2, + PrincipalData::Standard(StandardPrincipalData::transient()), + None, + LimitedCostTracker::new_free(), + |env| { + env.eval_read_only( + &boot_code_id("pox-2", false), + &format!("(get-burn-block-info? pox-addrs u{})", &burn_height), + ) + }, + ) + .unwrap() + }, + ) .unwrap(); addrs }) diff --git a/stackslib/src/chainstate/stacks/boot/pox_4_tests.rs b/stackslib/src/chainstate/stacks/boot/pox_4_tests.rs index 1be2bb5ba32..ac59772f320 100644 --- a/stackslib/src/chainstate/stacks/boot/pox_4_tests.rs +++ b/stackslib/src/chainstate/stacks/boot/pox_4_tests.rs @@ -26,6 +26,7 @@ use clarity::vm::errors::{ }; use clarity::vm::eval; use clarity::vm::events::{STXEventType, STXLockEventData, StacksTransactionEvent}; +use clarity::vm::functions::principals; use clarity::vm::representations::SymbolicExpression; use clarity::vm::tests::{execute, is_committed, is_err_code, symbols_from_values}; use clarity::vm::types::Value::Response; @@ -53,12 +54,16 @@ use crate::chainstate::burn::operations::*; use crate::chainstate::burn::{BlockSnapshot, ConsensusHash}; use crate::chainstate::coordinator::tests::pox_addr_from; use crate::chainstate::nakamoto::test_signers::TestSigners; +use crate::chainstate::nakamoto::tests::node::TestStacker; use crate::chainstate::stacks::address::{PoxAddress, PoxAddressType20, PoxAddressType32}; use crate::chainstate::stacks::boot::pox_2_tests::{ check_pox_print_event, generate_pox_clarity_value, get_partial_stacked, get_reward_cycle_total, get_reward_set_entries_at, get_stacking_state_pox, get_stx_account_at, with_clarity_db_ro, PoxPrintFields, StackingStateCheckData, }; +use crate::chainstate::stacks::boot::signers_tests::{ + get_signer_index, prepare_signers_test, readonly_call, +}; use crate::chainstate::stacks::boot::{ PoxVersions, BOOT_CODE_COST_VOTING_TESTNET as BOOT_CODE_COST_VOTING, BOOT_CODE_POX_TESTNET, MINERS_NAME, POX_2_NAME, POX_3_NAME, @@ -827,24 +832,28 @@ fn get_burn_pox_addr_info(peer: &mut TestPeer) -> (Vec, u128) { let burn_height = tip.block_height - 1; let addrs_and_payout = with_sortdb(peer, |ref mut chainstate, ref mut sortdb| { let addrs = chainstate - .maybe_read_only_clarity_tx(&sortdb.index_conn(), &tip_index_block, |clarity_tx| { - clarity_tx - .with_readonly_clarity_env( - false, - 0x80000000, - ClarityVersion::Clarity2, - PrincipalData::Standard(StandardPrincipalData::transient()), - None, - LimitedCostTracker::new_free(), - |env| { - env.eval_read_only( - &boot_code_id("pox-2", false), - &format!("(get-burn-block-info? pox-addrs u{})", &burn_height), - ) - }, - ) - .unwrap() - }) + .maybe_read_only_clarity_tx( + &sortdb.index_handle_at_tip(), + &tip_index_block, + |clarity_tx| { + clarity_tx + .with_readonly_clarity_env( + false, + 0x80000000, + ClarityVersion::Clarity2, + PrincipalData::Standard(StandardPrincipalData::transient()), + None, + LimitedCostTracker::new_free(), + |env| { + env.eval_read_only( + &boot_code_id("pox-2", false), + &format!("(get-burn-block-info? pox-addrs u{})", &burn_height), + ) + }, + ) + .unwrap() + }, + ) .unwrap(); addrs }) @@ -2939,7 +2948,7 @@ fn verify_signer_key_sig( ) -> Value { let result: Value = with_sortdb(peer, |ref mut chainstate, ref mut sortdb| { chainstate - .with_read_only_clarity_tx(&sortdb.index_conn(), &latest_block, |clarity_tx| { + .with_read_only_clarity_tx(&sortdb.index_handle_at_tip(), &latest_block, |clarity_tx| { clarity_tx .with_readonly_clarity_env( false, @@ -4248,7 +4257,7 @@ fn stack_agg_increase() { let default_initial_balances = 1_000_000_000_000_000_000; let observer = TestEventObserver::new(); - let test_signers = TestSigners::default(); + let test_signers = TestSigners::new(vec![]); let mut initial_balances = vec![ (alice.principal.clone(), default_initial_balances), (bob.principal.clone(), default_initial_balances), @@ -4492,7 +4501,7 @@ fn stack_agg_increase() { .clone() .expect_u128() .unwrap(), - Some(alice_signature_increase), + Some(alice_signature_increase.clone()), &alice.public_key, u128::MAX, 1, @@ -4590,16 +4599,52 @@ fn stack_agg_increase() { &bob_err_increase_result_expected ); + let bob_aggregate_increase_tx = &tx_block.receipts.get(4).unwrap(); + // Fetch the aggregate increase result & check that value is true - let bob_aggregate_increase_result = &tx_block - .receipts - .get(4) - .unwrap() + let bob_aggregate_increase_result = bob_aggregate_increase_tx .result .clone() .expect_result_ok() .unwrap(); - assert_eq!(bob_aggregate_increase_result, &Value::Bool(true)); + assert_eq!(bob_aggregate_increase_result, Value::Bool(true)); + + let aggregation_increase_event = &bob_aggregate_increase_tx.events[0]; + + let expected_result = Value::okay(Value::Tuple( + TupleData::from_data(vec![ + ( + "stacker".into(), + Value::Principal(PrincipalData::from(bob.address.clone())), + ), + ("total-locked".into(), Value::UInt(min_ustx * 2)), + ]) + .unwrap(), + )) + .unwrap(); + + let increase_op_data = HashMap::from([ + ( + "signer-sig", + Value::some(Value::buff_from(alice_signature_increase).unwrap()).unwrap(), + ), + ( + "signer-key", + Value::buff_from(alice.public_key.to_bytes_compressed()).unwrap(), + ), + ("max-amount", Value::UInt(u128::MAX)), + ("auth-id", Value::UInt(1)), + ]); + + let common_data = PoxPrintFields { + op_name: "stack-aggregation-increase".to_string(), + stacker: Value::Principal(PrincipalData::from(bob.address.clone())), + balance: Value::UInt(1000000000000000000), + locked: Value::UInt(0), + burnchain_unlock_height: Value::UInt(0), + }; + + check_pox_print_event(&aggregation_increase_event, common_data, increase_op_data); // Check that Bob's second pool has an assigned reward index of 1 let bob_aggregate_commit_reward_index = &tx_block @@ -6407,98 +6452,1705 @@ fn delegate_stack_increase() { assert_eq!(&reward_entry.signer.unwrap(), signer_pk_bytes.as_slice()); } -// In this test case, Alice delegates twice the stacking minimum to Bob. -// Bob stacks Alice's funds, and then immediately tries to stacks-aggregation-increase. -// This should return a clarity user error. -#[test] -fn delegate_stack_increase_err() { - let lock_period: u128 = 2; - let observer = TestEventObserver::new(); - let (burnchain, mut peer, keys, latest_block, block_height, mut coinbase_nonce) = - prepare_pox4_test(function_name!(), Some(&observer)); +pub fn pox_4_scenario_test_setup<'a>( + test_name: &str, + observer: &'a TestEventObserver, + initial_balances: Vec<(PrincipalData, u64)>, +) -> ( + TestPeer<'a>, + usize, + u64, + u128, + u128, + u128, + u128, + TestPeerConfig, +) { + // Setup code extracted from your original test + let test_signers = TestSigners::new(vec![]); + let aggregate_public_key = test_signers.aggregate_public_key.clone(); + let mut peer_config = TestPeerConfig::new(function_name!(), 0, 0); + let private_key = peer_config.private_key.clone(); + let addr = StacksAddress::from_public_keys( + C32_ADDRESS_VERSION_TESTNET_SINGLESIG, + &AddressHashMode::SerializeP2PKH, + 1, + &vec![StacksPublicKey::from_private(&private_key)], + ) + .unwrap(); - let alice_nonce = 0; - let alice_key = &keys[0]; - let alice_address = PrincipalData::from(key_to_stacks_addr(alice_key)); - let mut bob_nonce = 0; - let bob_delegate_key = &keys[1]; - let bob_delegate_address = PrincipalData::from(key_to_stacks_addr(bob_delegate_key)); - let min_ustx = get_stacking_minimum(&mut peer, &latest_block); - let signer_sk = StacksPrivateKey::from_seed(&[1, 3, 3, 7]); - let signer_pk = StacksPublicKey::from_private(&signer_sk); - let signer_pk_bytes = signer_pk.to_bytes_compressed(); - let signer_key_val = Value::buff_from(signer_pk_bytes.clone()).unwrap(); + peer_config.aggregate_public_key = Some(aggregate_public_key.clone()); + peer_config + .stacker_dbs + .push(boot_code_id(MINERS_NAME, false)); + peer_config.epochs = Some(StacksEpoch::unit_test_3_0_only(1000)); + peer_config.initial_balances = vec![(addr.to_account_principal(), 1_000_000_000_000_000_000)]; + peer_config + .initial_balances + .append(&mut initial_balances.clone()); + peer_config.burnchain.pox_constants.v2_unlock_height = 81; + peer_config.burnchain.pox_constants.pox_3_activation_height = 101; + peer_config.burnchain.pox_constants.v3_unlock_height = 102; + peer_config.burnchain.pox_constants.pox_4_activation_height = 105; + peer_config.test_signers = Some(test_signers.clone()); + peer_config.burnchain.pox_constants.reward_cycle_length = 20; + peer_config.burnchain.pox_constants.prepare_length = 5; - let pox_addr = PoxAddress::from_legacy( - AddressHashMode::SerializeP2PKH, - key_to_stacks_addr(bob_delegate_key).bytes, - ); + let mut peer = TestPeer::new_with_observer(peer_config.clone(), Some(&observer)); - let next_reward_cycle = 1 + burnchain - .block_height_to_reward_cycle(block_height) - .unwrap(); + let mut peer_nonce = 0; - let delegate_stx = make_pox_4_delegate_stx( - alice_key, - alice_nonce, - 2 * min_ustx, - bob_delegate_address.clone(), - None, - Some(pox_addr.clone()), - ); + let reward_cycle_len = peer.config.burnchain.pox_constants.reward_cycle_length; + let prepare_phase_len = peer.config.burnchain.pox_constants.prepare_length; - let alice_principal = PrincipalData::from(key_to_stacks_addr(alice_key)); + let target_height = peer.config.burnchain.pox_constants.pox_4_activation_height; + let mut latest_block = None; - let delegate_stack_stx = make_pox_4_delegate_stack_stx( - bob_delegate_key, - bob_nonce, - alice_principal, - min_ustx * 2, - pox_addr.clone(), - block_height as u128, - lock_period, - ); + while peer.get_burn_block_height() < u64::from(target_height) { + latest_block = Some(peer.tenure_with_txs(&[], &mut peer_nonce)); + observer.get_blocks(); + } + let latest_block = latest_block.expect("Failed to get tip"); - let txs = vec![delegate_stx, delegate_stack_stx]; + let reward_cycle = get_current_reward_cycle(&peer, &peer.config.burnchain); + let next_reward_cycle = reward_cycle.wrapping_add(1); + let burn_block_height = peer.get_burn_block_height(); + let current_block_height = peer.config.current_block; + let min_ustx = get_stacking_minimum(&mut peer, &latest_block); - let latest_block = peer.tenure_with_txs(&txs, &mut coinbase_nonce); + ( + peer, + peer_nonce, + burn_block_height, + target_height as u128, + reward_cycle as u128, + next_reward_cycle as u128, + min_ustx as u128, + peer_config.clone(), + ) +} - bob_nonce += 1; +// In this test two solo stacker-signers Alice & Bob sign & stack +// for two reward cycles. Alice provides a signature, Bob uses +// 'set-signer-key-authorizations' to authorize. Two cycles later, +// when no longer stacked, they both try replaying their auths. +#[test] +fn test_scenario_one() { + // Alice solo stacker-signer setup + let mut alice = StackerSignerInfo::new(); + // Bob solo stacker-signer setup + let mut bob = StackerSignerInfo::new(); + let default_initial_balances: u64 = 1_000_000_000_000_000_000; + let initial_balances = vec![ + (alice.principal.clone(), default_initial_balances), + (bob.principal.clone(), default_initial_balances), + ]; - let signature = make_signer_key_signature( - &pox_addr, - &signer_sk, - next_reward_cycle.into(), - &Pox4SignatureTopic::AggregationIncrease, - 1_u128, + let observer = TestEventObserver::new(); + let ( + mut peer, + mut peer_nonce, + burn_block_height, + target_height, + reward_cycle, + next_reward_cycle, + min_ustx, + peer_config, + ) = pox_4_scenario_test_setup("test_scenario_one", &observer, initial_balances); + + // Alice Signatures + let amount = (default_initial_balances / 2).wrapping_sub(1000) as u128; + let lock_period = 1; + let alice_signature = make_signer_key_signature( + &alice.pox_address, + &alice.private_key, + reward_cycle, + &Pox4SignatureTopic::StackStx, + lock_period, u128::MAX, 1, ); + let alice_signature_err = make_signer_key_signature( + &alice.pox_address, + &alice.private_key, + reward_cycle - 1, + &Pox4SignatureTopic::StackStx, + lock_period, + 100, + 2, + ); - // Bob's Aggregate Increase - let bobs_aggregate_increase = make_pox_4_aggregation_increase( - &bob_delegate_key, - bob_nonce, - &pox_addr, - next_reward_cycle.into(), - 0, - Some(signature), - &signer_pk, + // Bob Authorizations + let bob_authorization_low = make_pox_4_set_signer_key_auth( + &bob.pox_address, + &bob.private_key, + reward_cycle, + &Pox4SignatureTopic::StackStx, + lock_period, + true, + bob.nonce, + Some(&bob.private_key), + 100, + 2, + ); + bob.nonce += 1; + let bob_authorization = make_pox_4_set_signer_key_auth( + &bob.pox_address, + &bob.private_key, + reward_cycle, + &Pox4SignatureTopic::StackStx, + lock_period, + true, + bob.nonce, + Some(&bob.private_key), u128::MAX, + 3, + ); + bob.nonce += 1; + + // Alice stacks + let alice_err_nonce = alice.nonce; + let alice_stack_err = make_pox_4_lockup( + &alice.private_key, + alice_err_nonce, + amount, + &alice.pox_address, + lock_period, + &alice.public_key, + burn_block_height, + Some(alice_signature_err), + 100, 1, ); - let txs = vec![bobs_aggregate_increase]; + let alice_stack_nonce = alice_err_nonce + 1; + let alice_stack = make_pox_4_lockup( + &alice.private_key, + alice_stack_nonce, + amount, + &alice.pox_address, + lock_period, + &alice.public_key, + burn_block_height, + Some(alice_signature.clone()), + u128::MAX, + 1, + ); + alice.nonce = alice_stack_nonce + 1; - let latest_block = peer.tenure_with_txs(&txs, &mut coinbase_nonce); + // Bob stacks + let bob_nonce_stack_err = bob.nonce; + let bob_stack_err = make_pox_4_lockup( + &bob.private_key, + bob_nonce_stack_err, + amount, + &bob.pox_address, + lock_period, + &bob.public_key, + burn_block_height, + None, + 100, + 2, + ); + let bob_nonce_stack = bob_nonce_stack_err + 1; + let bob_stack = make_pox_4_lockup( + &bob.private_key, + bob_nonce_stack, + amount, + &bob.pox_address, + lock_period, + &bob.public_key, + burn_block_height, + None, + u128::MAX, + 3, + ); + bob.nonce = bob_nonce_stack + 1; - let delegate_transactions = - get_last_block_sender_transactions(&observer, key_to_stacks_addr(bob_delegate_key)); + let txs = vec![ + bob_authorization_low, + bob_authorization, + alice_stack_err, + alice_stack, + bob_stack_err, + bob_stack, + ]; - let actual_result = delegate_transactions.first().cloned().unwrap().result; + // Commit tx & advance to the reward set calculation height (2nd block of the prepare phase) + let target_height = peer + .config + .burnchain + .reward_cycle_to_block_height(next_reward_cycle as u64) + .saturating_sub(peer.config.burnchain.pox_constants.prepare_length as u64) + .wrapping_add(2); + let (latest_block, tx_block) = + advance_to_block_height(&mut peer, &observer, &txs, &mut peer_nonce, target_height); + + // Verify Alice stacked + let (pox_address, first_reward_cycle, lock_period, _indices) = + get_stacker_info_pox_4(&mut peer, &alice.principal) + .expect("Failed to find alice initial stack-stx"); + assert_eq!(first_reward_cycle, next_reward_cycle); + assert_eq!(pox_address, alice.pox_address); + + // Verify Bob stacked + let (pox_address, first_reward_cycle, lock_period, _indices) = + get_stacker_info_pox_4(&mut peer, &bob.principal) + .expect("Failed to find bob initial stack-stx"); + assert_eq!(first_reward_cycle, next_reward_cycle); + assert_eq!(pox_address, bob.pox_address); + + // 1. Check bob's low authorization transaction + let bob_tx_result_low = tx_block + .receipts + .get(1) + .unwrap() + .result + .clone() + .expect_result_ok() + .unwrap(); + assert_eq!(bob_tx_result_low, Value::Bool(true)); - // Should be a DELEGATION NO REWARD SLOT error - let expected_result = Value::error(Value::Int(28)).unwrap(); + // 2. Check bob's expected authorization transaction + let bob_tx_result_ok = tx_block + .receipts + .get(2) + .unwrap() + .result + .clone() + .expect_result_ok() + .unwrap(); + assert_eq!(bob_tx_result_ok, Value::Bool(true)); + + // 3. Check alice's low stack transaction + let alice_tx_result_err = tx_block + .receipts + .get(3) + .unwrap() + .result + .clone() + .expect_result_err() + .unwrap(); + assert_eq!(alice_tx_result_err, Value::Int(38)); + + // Get alice's expected stack transaction + let alice_tx_result_ok = tx_block + .receipts + .get(4) + .unwrap() + .result + .clone() + .expect_result_ok() + .unwrap() + .expect_tuple() + .unwrap(); + + // 4.1 Check amount locked + let amount_locked_expected = Value::UInt(amount); + let amount_locked_actual = alice_tx_result_ok + .data_map + .get("lock-amount") + .unwrap() + .clone(); + assert_eq!(amount_locked_actual, amount_locked_expected); + + // 4.2 Check signer key + let signer_key_expected = Value::buff_from(alice.public_key.to_bytes_compressed()).unwrap(); + let signer_key_actual = alice_tx_result_ok + .data_map + .get("signer-key") + .unwrap() + .clone(); + assert_eq!(signer_key_expected, signer_key_actual); + + // 4.3 Check unlock height + let unlock_height_expected = Value::UInt( + peer.config + .burnchain + .reward_cycle_to_block_height(next_reward_cycle as u64 + lock_period as u64) + .wrapping_sub(1) as u128, + ); + let unlock_height_actual = alice_tx_result_ok + .data_map + .get("unlock-burn-height") + .unwrap() + .clone(); + assert_eq!(unlock_height_expected, unlock_height_actual); + + // 5. Check bob's error stack transaction + let bob_tx_result_err = tx_block + .receipts + .get(5) + .unwrap() + .result + .clone() + .expect_result_err() + .unwrap(); + assert_eq!(bob_tx_result_err, Value::Int(38)); + + // Get bob's expected stack transaction + let bob_tx_result_ok = tx_block + .receipts + .get(6) + .unwrap() + .result + .clone() + .expect_result_ok() + .unwrap() + .expect_tuple() + .unwrap(); + + // 6.1 Check amount locked + let amount_locked_expected = Value::UInt(amount); + let amount_locked_actual = bob_tx_result_ok + .data_map + .get("lock-amount") + .unwrap() + .clone(); + assert_eq!(amount_locked_actual, amount_locked_expected); + + // 6.2 Check signer key + let signer_key_expected = Value::buff_from(bob.public_key.to_bytes_compressed()).unwrap(); + let signer_key_actual = bob_tx_result_ok.data_map.get("signer-key").unwrap().clone(); + assert_eq!(signer_key_expected, signer_key_actual); + + // 6.3 Check unlock height (end of cycle 7 - block 140) + let unlock_height_expected = Value::UInt( + peer.config + .burnchain + .reward_cycle_to_block_height((next_reward_cycle + lock_period) as u64) + .wrapping_sub(1) as u128, + ); + let unlock_height_actual = bob_tx_result_ok + .data_map + .get("unlock-burn-height") + .unwrap() + .clone(); + assert_eq!(unlock_height_expected, unlock_height_actual); + + // Now starting create vote txs + // Fetch signer indices in reward cycle 6 + let alice_index = get_signer_index( + &mut peer, + latest_block, + alice.address.clone(), + next_reward_cycle, + ); + let bob_index = get_signer_index( + &mut peer, + latest_block, + bob.address.clone(), + next_reward_cycle, + ); + // Alice vote + let alice_vote = make_signers_vote_for_aggregate_public_key( + &alice.private_key, + alice.nonce, + alice_index, + &peer_config.aggregate_public_key.unwrap(), + 1, + next_reward_cycle, + ); + alice.nonce += 1; + // Bob vote + let bob_vote = make_signers_vote_for_aggregate_public_key( + &bob.private_key, + bob.nonce, + bob_index, + &peer_config.aggregate_public_key.unwrap(), + 1, + next_reward_cycle, + ); + bob.nonce += 1; + let txs = vec![alice_vote, bob_vote]; + + let target_reward_cycle = 8; + // Commit vote txs & advance to the first burn block of reward cycle 8 (block 161) + let mut target_height = peer + .config + .burnchain + .reward_cycle_to_block_height(target_reward_cycle as u64); + let (latest_block, tx_block) = + advance_to_block_height(&mut peer, &observer, &txs, &mut peer_nonce, target_height); + + let approved_key = get_approved_aggregate_key(&mut peer, latest_block, next_reward_cycle) + .expect("No approved key found"); + + // Start replay transactions + // Alice stacks with a replayed signature + let alice_replay_nonce = alice.nonce; + let alice_stack_replay = make_pox_4_lockup( + &alice.private_key, + alice_replay_nonce, + amount, + &alice.pox_address, + lock_period, + &alice.public_key, + 161, + Some(alice_signature.clone()), + u128::MAX, + 1, + ); + // Bob stacks with a replayed authorization + let bob_nonce_stack_replay = bob.nonce; + let bob_stack_replay = make_pox_4_lockup( + &bob.private_key, + bob_nonce_stack_replay, + amount, + &bob.pox_address, + lock_period, + &bob.public_key, + 161, + None, + u128::MAX, + 3, + ); + let txs = vec![alice_stack_replay, bob_stack_replay]; + + // Commit replay txs & advance to the second burn block of reward cycle 8 (block 162) + target_height += 1; + let (latest_block, tx_block) = + advance_to_block_height(&mut peer, &observer, &txs, &mut peer_nonce, target_height); + + // Check Alice replay, expect (err 35) - ERR_INVALID_SIGNATURE_PUBKEY + let alice_replay_result = tx_block + .receipts + .get(1) + .unwrap() + .result + .clone() + .expect_result_err() + .unwrap(); + assert_eq!(alice_replay_result, Value::Int(35)); + + // Check Bob replay, expect (err 19) - ERR_SIGNER_AUTH_USED + let bob_tx_result = tx_block + .receipts + .get(2) + .unwrap() + .result + .clone() + .expect_result_err() + .unwrap(); + assert_eq!(bob_tx_result, Value::Int(19)); +} + +// In this test two solo service signers, Alice & Bob, provide auth +// for Carl & Dave, solo stackers. Alice provides a signature for Carl, +// Bob uses 'set-signer-key...' for Dave. +#[test] +fn test_scenario_two() { + // Alice service signer setup + let mut alice = StackerSignerInfo::new(); + // Bob service signer setup + let mut bob = StackerSignerInfo::new(); + // Carl solo stacker setup + let mut carl = StackerSignerInfo::new(); + // Dave solo stacker setup + let mut dave = StackerSignerInfo::new(); + + let default_initial_balances = 1_000_000_000_000_000_000; + let initial_balances = vec![ + (alice.principal.clone(), default_initial_balances), + (bob.principal.clone(), default_initial_balances), + (carl.principal.clone(), default_initial_balances), + (dave.principal.clone(), default_initial_balances), + ]; + let observer = TestEventObserver::new(); + let ( + mut peer, + mut peer_nonce, + burn_block_height, + target_height, + reward_cycle, + next_reward_cycle, + min_ustx, + peer_config, + ) = pox_4_scenario_test_setup("test_scenario_two", &observer, initial_balances); + + // Alice Signature For Carl + let amount = (default_initial_balances / 2).wrapping_sub(1000) as u128; + let lock_period = 1; + let alice_signature_for_carl = make_signer_key_signature( + &carl.pox_address, + &alice.private_key, + reward_cycle, + &Pox4SignatureTopic::StackStx, + lock_period, + u128::MAX, + 1, + ); + // Bob Authorization For Dave + let bob_authorization_for_dave = make_pox_4_set_signer_key_auth( + &dave.pox_address, + &bob.private_key, + reward_cycle, + &Pox4SignatureTopic::StackStx, + lock_period, + true, + bob.nonce, + Some(&bob.private_key), + u128::MAX, + 1, + ); + bob.nonce += 1; + + // Carl Stacks w/ Alices Signature - Malformed (lock period) + let carl_stack_err = make_pox_4_lockup( + &carl.private_key, + carl.nonce, + amount, + &carl.pox_address, + lock_period + 1, + &alice.public_key, + burn_block_height, + Some(alice_signature_for_carl.clone()), + u128::MAX, + 1, + ); + carl.nonce += 1; + + // Carl Stacks w/ Alices Signature + let carl_stack = make_pox_4_lockup( + &carl.private_key, + carl.nonce, + amount, + &carl.pox_address, + lock_period, + &alice.public_key, + burn_block_height, + Some(alice_signature_for_carl.clone()), + u128::MAX, + 1, + ); + carl.nonce += 1; + + // Dave Stacks w/ Bobs Authorization - Malformed (pox) + let dave_stack_err = make_pox_4_lockup( + &dave.private_key, + dave.nonce, + amount, + &bob.pox_address, + lock_period, + &bob.public_key, + burn_block_height, + None, + u128::MAX, + 1, + ); + dave.nonce += 1; + + // Dave Stacks w/ Bobs Authorization + let dave_stack = make_pox_4_lockup( + &dave.private_key, + dave.nonce, + amount, + &dave.pox_address, + lock_period, + &bob.public_key, + burn_block_height, + None, + u128::MAX, + 1, + ); + dave.nonce += 1; + + let txs = vec![ + bob_authorization_for_dave, + carl_stack_err, + carl_stack, + dave_stack_err, + dave_stack, + ]; + + // Commit tx & advance to the reward set calculation height (2nd block of the prepare phase for reward cycle 6) + let target_height = peer + .config + .burnchain + .reward_cycle_to_block_height(next_reward_cycle as u64) + .saturating_sub(peer_config.burnchain.pox_constants.prepare_length as u64) + .wrapping_add(2); + let (latest_block, tx_block) = + advance_to_block_height(&mut peer, &observer, &txs, &mut peer_nonce, target_height); + + // Verify Carl Stacked + let (pox_address, first_reward_cycle, lock_period, _indices) = + get_stacker_info_pox_4(&mut peer, &carl.principal).expect("Failed to find stacker"); + assert_eq!(first_reward_cycle, next_reward_cycle); + assert_eq!(pox_address, carl.pox_address); + + // Verify Dave Stacked + let (pox_address, first_reward_cycle, lock_period, _indices) = + get_stacker_info_pox_4(&mut peer, &dave.principal).expect("Failed to find stacker"); + assert_eq!(first_reward_cycle, next_reward_cycle); + assert_eq!(pox_address, dave.pox_address); + + // Check Carl's malformed signature stack transaction (err 35 - INVALID_SIGNATURE_PUBKEY) + let carl_tx_result_err = tx_block + .receipts + .get(2) + .unwrap() + .result + .clone() + .expect_result_err() + .unwrap(); + assert_eq!(carl_tx_result_err, Value::Int(35)); + + // Check Carl's expected stack transaction + let carl_tx_result_ok = tx_block + .receipts + .get(3) + .unwrap() + .result + .clone() + .expect_result_ok() + .unwrap() + .expect_tuple() + .unwrap(); + + // Check Carl amount locked + let amount_locked_expected = Value::UInt(amount); + let amount_locked_actual = carl_tx_result_ok + .data_map + .get("lock-amount") + .unwrap() + .clone(); + assert_eq!(amount_locked_actual, amount_locked_expected); + + // Check Carl signer key + let signer_key_expected = Value::buff_from(alice.public_key.to_bytes_compressed()).unwrap(); + let signer_key_actual = carl_tx_result_ok + .data_map + .get("signer-key") + .unwrap() + .clone(); + assert_eq!(signer_key_expected, signer_key_actual); + + // Check Dave's malformed pox stack transaction (err 19 - INVALID_SIGNER_AUTH) + let dave_tx_result_err = tx_block + .receipts + .get(4) + .unwrap() + .result + .clone() + .expect_result_err() + .unwrap(); + assert_eq!(dave_tx_result_err, Value::Int(19)); + + // Check Dave's expected stack transaction + let dave_tx_result_ok = tx_block + .receipts + .get(5) + .unwrap() + .result + .clone() + .expect_result_ok() + .unwrap() + .expect_tuple() + .unwrap(); + + // Check Dave amount locked + let amount_locked_expected = Value::UInt(amount); + let amount_locked_actual = dave_tx_result_ok + .data_map + .get("lock-amount") + .unwrap() + .clone(); + assert_eq!(amount_locked_actual, amount_locked_expected); + + // Check Dave signer key + let signer_key_expected = Value::buff_from(bob.public_key.to_bytes_compressed()).unwrap(); + let signer_key_actual = dave_tx_result_ok + .data_map + .get("signer-key") + .unwrap() + .clone(); + assert_eq!(signer_key_expected, signer_key_actual); + + // Now starting create vote txs + // Fetch signer indices in reward cycle 6 + let alice_index = get_signer_index( + &mut peer, + latest_block, + alice.address.clone(), + next_reward_cycle, + ); + let bob_index = get_signer_index( + &mut peer, + latest_block, + bob.address.clone(), + next_reward_cycle, + ); + // Alice expected vote + let alice_vote_expected = make_signers_vote_for_aggregate_public_key( + &alice.private_key, + alice.nonce, + alice_index, + &peer_config.aggregate_public_key.unwrap(), + 1, + next_reward_cycle, + ); + alice.nonce += 1; + // Alice duplicate vote + let alice_vote_duplicate = make_signers_vote_for_aggregate_public_key( + &alice.private_key, + alice.nonce, + alice_index, + &peer_config.aggregate_public_key.unwrap(), + 1, + next_reward_cycle, + ); + alice.nonce += 1; + // Bob vote err (err 17 - INVALID_ROUND) + let bob_vote_err = make_signers_vote_for_aggregate_public_key( + &bob.private_key, + bob.nonce, + bob_index, + &peer_config.aggregate_public_key.unwrap(), + 3, + next_reward_cycle, + ); + bob.nonce += 1; + // Bob expected vote + let bob_vote_expected = make_signers_vote_for_aggregate_public_key( + &bob.private_key, + bob.nonce, + bob_index, + &peer_config.aggregate_public_key.unwrap(), + 1, + next_reward_cycle, + ); + bob.nonce += 1; + let txs = vec![ + alice_vote_expected, + alice_vote_duplicate, + bob_vote_err, + bob_vote_expected, + ]; + + let target_reward_cycle = 8; + // Commit vote txs & advance to the first burn block of reward cycle 8 (block 161) + let target_height = peer + .config + .burnchain + .reward_cycle_to_block_height(target_reward_cycle as u64); + let (latest_block, tx_block) = + advance_to_block_height(&mut peer, &observer, &txs, &mut peer_nonce, target_height); + + // Check Alice's expected vote + let alice_expected_vote = tx_block + .receipts + .get(1) + .unwrap() + .result + .clone() + .expect_result_ok() + .unwrap(); + assert_eq!(alice_expected_vote, Value::Bool(true)); + + // Check Alice's duplicate vote (err 15 - DUPLICATE_ROUND) + let alice_duplicate_vote = tx_block + .receipts + .get(2) + .unwrap() + .result + .clone() + .expect_result_err() + .unwrap(); + assert_eq!(alice_duplicate_vote, Value::UInt(15)); + + // Check Bob's round err vote (err 17 - INVALID_ROUND) + let bob_round_err_vote = tx_block + .receipts + .get(3) + .unwrap() + .result + .clone() + .expect_result_err() + .unwrap(); + assert_eq!(bob_round_err_vote, Value::UInt(17)); + + // Check Bob's expected vote + let bob_expected_vote = tx_block + .receipts + .get(4) + .unwrap() + .result + .clone() + .expect_result_ok() + .unwrap(); + assert_eq!(bob_expected_vote, Value::Bool(true)); +} + +// In this scenario, two solo stacker-signers (Alice, Bob), one service signer (Carl), +// one stacking pool operator (Dave), & three pool stackers (Eve, Frank, Grace). +#[test] +fn test_scenario_three() { + // Alice stacker signer setup + let mut alice = StackerSignerInfo::new(); + // Bob stacker signer setup + let mut bob = StackerSignerInfo::new(); + // Carl service signer setup + let carl = StackerSignerInfo::new(); + // David stacking pool operator setup + let mut david = StackerSignerInfo::new(); + // Eve pool stacker setup + let mut eve = StackerSignerInfo::new(); + // Frank pool stacker setup + let mut frank = StackerSignerInfo::new(); + // Grace pool stacker setup + let mut grace = StackerSignerInfo::new(); + + let default_initial_balances = 1_000_000_000_000_000_000; + let initial_balances = vec![ + (alice.principal.clone(), default_initial_balances), + (bob.principal.clone(), default_initial_balances), + (carl.principal.clone(), default_initial_balances), + (david.principal.clone(), default_initial_balances), + (eve.principal.clone(), default_initial_balances), + (frank.principal.clone(), default_initial_balances), + (grace.principal.clone(), default_initial_balances), + ]; + let observer = TestEventObserver::new(); + let ( + mut peer, + mut peer_nonce, + burn_block_height, + target_height, + reward_cycle, + next_reward_cycle, + min_ustx, + peer_config, + ) = pox_4_scenario_test_setup("test_scenario_three", &observer, initial_balances); + + let lock_period = 2; + let amount = (default_initial_balances / 2).wrapping_sub(1000) as u128; + let alice_signature_for_alice_err = make_signer_key_signature( + &alice.pox_address, + &alice.private_key, + reward_cycle, + &Pox4SignatureTopic::StackStx, + 13, + u128::MAX, + 1, + ); + let alice_signature_for_alice_expected = make_signer_key_signature( + &alice.pox_address, + &alice.private_key, + reward_cycle, + &Pox4SignatureTopic::StackStx, + lock_period, + u128::MAX, + 1, + ); + let bob_signature_for_bob_err = make_signer_key_signature( + &bob.pox_address, + &bob.private_key, + reward_cycle - 1, + &Pox4SignatureTopic::StackStx, + lock_period, + u128::MAX, + 1, + ); + let bob_signature_for_bob_expected = make_signer_key_signature( + &bob.pox_address, + &bob.private_key, + reward_cycle, + &Pox4SignatureTopic::StackStx, + lock_period, + u128::MAX, + 1, + ); + let carl_signature_for_david_err = make_signer_key_signature( + &david.pox_address, + &carl.private_key, + reward_cycle, + &Pox4SignatureTopic::StackStx, + 1, + u128::MAX, + 1, + ); + let carl_signature_for_david = make_signer_key_signature( + &david.pox_address, + &carl.private_key, + next_reward_cycle, + &Pox4SignatureTopic::AggregationCommit, + 1, + u128::MAX, + 1, + ); + // Alice solo stack, error + let alice_stack_tx_err = make_pox_4_lockup( + &alice.private_key, + alice.nonce, + amount, + &alice.pox_address, + lock_period, + &alice.public_key, + burn_block_height, + Some(alice_signature_for_alice_err.clone()), + u128::MAX, + 1, + ); + alice.nonce += 1; + // Alice solo stack + let alice_stack_tx_expected = make_pox_4_lockup( + &alice.private_key, + alice.nonce, + amount, + &alice.pox_address, + lock_period, + &alice.public_key, + burn_block_height, + Some(alice_signature_for_alice_expected), + u128::MAX, + 1, + ); + alice.nonce += 1; + // Bob solo stack, error + let bob_stack_tx_err = make_pox_4_lockup( + &bob.private_key, + bob.nonce, + amount, + &bob.pox_address, + lock_period, + &bob.public_key, + burn_block_height, + Some(bob_signature_for_bob_err.clone()), + u128::MAX, + 1, + ); + bob.nonce += 1; + // Bob solo stack + let bob_stack_tx_expected = make_pox_4_lockup( + &bob.private_key, + bob.nonce, + amount, + &bob.pox_address, + lock_period, + &bob.public_key, + burn_block_height, + Some(bob_signature_for_bob_expected), + u128::MAX, + 1, + ); + bob.nonce += 1; + // Eve pool stacker delegating STX to David + let eve_delegate_stx_to_david_tx = make_pox_4_delegate_stx( + &eve.private_key, + eve.nonce, + amount, + david.principal.clone(), + Some( + peer.config + .burnchain + .reward_cycle_to_block_height(next_reward_cycle as u64) + .into(), + ), + Some(david.pox_address.clone()), + ); + eve.nonce += 1; + // Frank pool stacker delegating STX to David + let frank_delegate_stx_to_david_tx = make_pox_4_delegate_stx( + &frank.private_key, + frank.nonce, + amount, + david.principal.clone(), + None, + Some(david.pox_address.clone()), + ); + frank.nonce += 1; + // Grace pool stacker delegating STX to David + let grace_delegate_stx_to_david_tx = make_pox_4_delegate_stx( + &grace.private_key, + grace.nonce, + amount, + david.principal.clone(), + None, + Some(david.pox_address.clone()), + ); + grace.nonce += 1; + // Alice error delegating while stacked + let alice_delegate_stx_to_david_err = make_pox_4_delegate_stx( + &alice.private_key, + alice.nonce, + amount, + david.principal.clone(), + None, + Some(david.pox_address.clone()), + ); + // Collecting all the pool stackers + let davids_stackers = &[ + (eve.clone(), lock_period), + (frank.clone(), lock_period), + (grace.clone(), lock_period), + (alice.clone(), lock_period), + ]; + let davids_delegate_stack_stx_txs: Vec<_> = davids_stackers + .iter() + .map(|(stacker, lock_period)| { + let tx = make_pox_4_delegate_stack_stx( + &david.private_key, + david.nonce, + stacker.principal.clone(), + amount, + david.pox_address.clone(), + burn_block_height as u128, + *lock_period, + ); + david.nonce += 1; + tx + }) + .collect(); + // Aggregate commit david's pool stackers, error by committing for two cycles + let davids_aggregate_commit_index_tx_err_cycles = make_pox_4_aggregation_commit_indexed( + &david.private_key, + david.nonce, + &david.pox_address, + next_reward_cycle.wrapping_add(1), + Some(carl_signature_for_david.clone()), + &carl.public_key, + u128::MAX, + 1, + ); + david.nonce += 1; + // Aggregate commit david's pool stackers, error by committing for two cycles + let davids_aggregate_commit_index_tx_err_signature = make_pox_4_aggregation_commit_indexed( + &david.private_key, + david.nonce, + &david.pox_address, + next_reward_cycle, + Some(carl_signature_for_david_err.clone()), + &carl.public_key, + u128::MAX, + 1, + ); + david.nonce += 1; + // Aggregate commit david's pool stackers correctly + let davids_aggregate_commit_index_tx = make_pox_4_aggregation_commit_indexed( + &david.private_key, + david.nonce, + &david.pox_address, + next_reward_cycle, + Some(carl_signature_for_david.clone()), + &carl.public_key, + u128::MAX, + 1, + ); + david.nonce += 1; + + let mut txs = vec![ + alice_stack_tx_err, + alice_stack_tx_expected, + bob_stack_tx_err, + bob_stack_tx_expected, + eve_delegate_stx_to_david_tx, + frank_delegate_stx_to_david_tx, + grace_delegate_stx_to_david_tx, + alice_delegate_stx_to_david_err, + ]; + txs.extend(davids_delegate_stack_stx_txs); + txs.extend(vec![ + davids_aggregate_commit_index_tx_err_cycles, + davids_aggregate_commit_index_tx_err_signature, + davids_aggregate_commit_index_tx, + ]); + + // Commit txs in next block & advance to reward set calculation of the next reward cycle + let target_height = peer + .config + .burnchain + .reward_cycle_to_block_height(next_reward_cycle as u64) + .saturating_sub(peer_config.burnchain.pox_constants.prepare_length as u64) + .wrapping_add(2); + let (latest_block, tx_block) = + advance_to_block_height(&mut peer, &observer, &txs, &mut peer_nonce, target_height); + + // Start of test checks + // 1. Check that Alice can't stack with an lock_period different than signature + let alice_stack_tx_err = tx_block + .receipts + .get(1) + .unwrap() + .result + .clone() + .expect_result_err() + .unwrap(); + assert_eq!(alice_stack_tx_err, Value::Int(35)); + + // 2. Check that Alice can solo stack-sign + let alice_stack_tx_ok = tx_block + .receipts + .get(2) + .unwrap() + .result + .clone() + .expect_result_ok() + .unwrap() + .expect_tuple() + .unwrap(); + + // Check Alice amount locked + let amount_locked_expected = Value::UInt(amount); + let amount_locked_actual = alice_stack_tx_ok + .data_map + .get("lock-amount") + .unwrap() + .clone(); + assert_eq!(amount_locked_actual, amount_locked_expected); + + // Check Alice signer key + let signer_key_expected = Value::buff_from(alice.public_key.to_bytes_compressed()).unwrap(); + let signer_key_actual = alice_stack_tx_ok + .data_map + .get("signer-key") + .unwrap() + .clone(); + assert_eq!(signer_key_expected, signer_key_actual); + + // 3. Check that Bob can't stack with a signature that points to a reward cycle in the past + let bob_stack_tx_err = tx_block + .receipts + .get(3) + .unwrap() + .result + .clone() + .expect_result_err() + .unwrap(); + assert_eq!(bob_stack_tx_err, Value::Int(35)); + + // 4. Check that Bob can solo stack-sign + let bob_stack_tx_ok = tx_block + .receipts + .get(4) + .unwrap() + .result + .clone() + .expect_result_ok() + .unwrap() + .expect_tuple() + .unwrap(); + + // Check Bob amount locked + let amount_locked_expected = Value::UInt(amount); + let amount_locked_actual = bob_stack_tx_ok.data_map.get("lock-amount").unwrap().clone(); + assert_eq!(amount_locked_actual, amount_locked_expected); + + // Check Bob signer key + let signer_key_expected = Value::buff_from(bob.public_key.to_bytes_compressed()); + let signer_key_actual = bob_stack_tx_ok.data_map.get("signer-key").unwrap().clone(); + assert_eq!(signer_key_actual, signer_key_actual); + + // 5. Check that David can't delegate-stack-stx Eve if delegation expires during lock period + let eve_delegate_stx_to_david_err = tx_block + .receipts + .get(9) + .unwrap() + .result + .clone() + .expect_result_err() + .unwrap(); + assert_eq!(eve_delegate_stx_to_david_err, Value::Int(21)); + + // 6. Check that Frank is correctly delegated to David + let frank_delegate_stx_to_david_tx = tx_block + .receipts + .get(10) + .unwrap() + .result + .clone() + .expect_result_ok() + .unwrap() + .expect_tuple() + .unwrap(); + + // Check Frank amount locked + let amount_locked_expected = Value::UInt(amount); + let amount_locked_actual = frank_delegate_stx_to_david_tx + .data_map + .get("lock-amount") + .unwrap() + .clone(); + assert_eq!(amount_locked_actual, amount_locked_expected); + + // Check Frank stacker address + let stacker_expected = Value::Principal(frank.address.clone().into()); + let stacker_actual = frank_delegate_stx_to_david_tx + .data_map + .get("stacker") + .unwrap() + .clone(); + assert_eq!(stacker_expected, stacker_actual); + + // 7. Check that Grace is correctly delegated to David + let grace_delegate_stx_to_david_tx = tx_block + .receipts + .get(11) + .unwrap() + .result + .clone() + .expect_result_ok() + .unwrap() + .expect_tuple() + .unwrap(); + + // Check Grace amount locked + let amount_locked_expected = Value::UInt(amount); + let amount_locked_actual = grace_delegate_stx_to_david_tx + .data_map + .get("lock-amount") + .unwrap() + .clone(); + assert_eq!(amount_locked_actual, amount_locked_expected); + + // Check Grace stacker address + let stacker_expected = Value::Principal(grace.address.clone().into()); + let stacker_actual = grace_delegate_stx_to_david_tx + .data_map + .get("stacker") + .unwrap() + .clone(); + assert_eq!(stacker_expected, stacker_actual); + + // 8. Check that Alice can't delegate-stack if already stacking + let alice_delegate_stx_to_david_err = tx_block + .receipts + .get(12) + .unwrap() + .result + .clone() + .expect_result_err() + .unwrap(); + assert_eq!(alice_delegate_stx_to_david_err, Value::Int(3)); + + // 9. Check that David can't aggregate-commit-indexed if pointing to a reward cycle in the future + let david_aggregate_commit_indexed_err = tx_block + .receipts + .get(13) + .unwrap() + .result + .clone() + .expect_result_err() + .unwrap(); + assert_eq!(david_aggregate_commit_indexed_err, Value::Int(35)); + + // 10. Check that David can aggregate-commit-indexed if using the incorrect signature topic + let david_aggregate_commit_indexed_err = tx_block + .receipts + .get(14) + .unwrap() + .result + .clone() + .expect_result_err() + .unwrap(); + assert_eq!(david_aggregate_commit_indexed_err, Value::Int(35)); + + // 11. Check that David can aggregate-commit-indexed successfully, checking stacking index = 2 + let david_aggregate_commit_indexed_ok = tx_block + .receipts + .get(15) + .unwrap() + .result + .clone() + .expect_result_ok() + .unwrap(); + assert_eq!(david_aggregate_commit_indexed_ok, Value::UInt(2)); +} + +// In this test scenario two solo stacker-signers (Alice & Bob), +// test out the updated stack-extend & stack-increase functions +// across multiple cycles. +#[test] +fn test_scenario_four() { + // Alice service signer setup + let mut alice = StackerSignerInfo::new(); + // Bob service signer setup + let mut bob = StackerSignerInfo::new(); + + let default_initial_balances = 1_000_000_000_000_000_000; + let initial_balances = vec![ + (alice.principal.clone(), default_initial_balances), + (bob.principal.clone(), default_initial_balances), + ]; + let observer = TestEventObserver::new(); + let ( + mut peer, + mut peer_nonce, + burn_block_height, + target_height, + reward_cycle, + next_reward_cycle, + min_ustx, + peer_config, + ) = pox_4_scenario_test_setup("test_scenario_four", &observer, initial_balances); + + // Initial Alice Signature + let amount = (default_initial_balances / 2).wrapping_sub(1000) as u128; + let lock_period = 2; + let alice_signature_initial = make_signer_key_signature( + &alice.pox_address, + &alice.private_key, + reward_cycle, + &Pox4SignatureTopic::StackStx, + lock_period, + u128::MAX, + 1, + ); + // Extend Alice Signature Err (meant for Bob) + let alice_signature_extend_err = make_signer_key_signature( + &bob.pox_address, + &bob.private_key, + next_reward_cycle.wrapping_add(1), + &Pox4SignatureTopic::StackExtend, + lock_period, + u128::MAX, + 1, + ); + // Extend Alice Signature Expected + let alice_signature_extend = make_signer_key_signature( + &alice.pox_address, + &alice.private_key, + next_reward_cycle.wrapping_add(1), + &Pox4SignatureTopic::StackExtend, + lock_period, + u128::MAX, + 1, + ); + // Initial Bob Signature + let bob_signature_initial = make_signer_key_signature( + &bob.pox_address, + &bob.private_key, + reward_cycle, + &Pox4SignatureTopic::StackStx, + lock_period, + u128::MAX, + 1, + ); + // Alice initial stack + let alice_stack = make_pox_4_lockup( + &alice.private_key, + alice.nonce, + amount, + &alice.pox_address, + lock_period, + &alice.public_key, + burn_block_height, + Some(alice_signature_initial.clone()), + u128::MAX, + 1, + ); + alice.nonce += 1; + // Bob initial stack + let bob_stack = make_pox_4_lockup( + &bob.private_key, + bob.nonce, + amount, + &bob.pox_address, + lock_period, + &bob.public_key, + burn_block_height, + Some(bob_signature_initial.clone()), + u128::MAX, + 1, + ); + bob.nonce += 1; + + let txs = vec![alice_stack.clone(), bob_stack.clone()]; + + // Commit tx & advance to the reward set calculation height (2nd block of the prepare phase for reward cycle 6) + let target_height = peer + .config + .burnchain + .reward_cycle_to_block_height(next_reward_cycle as u64) + .saturating_sub(peer_config.burnchain.pox_constants.prepare_length as u64) + .wrapping_add(2); + let (latest_block, tx_block) = + advance_to_block_height(&mut peer, &observer, &txs, &mut peer_nonce, target_height); + + // Verify Alice Stacked + let (pox_address, first_reward_cycle, lock_period, _indices) = + get_stacker_info_pox_4(&mut peer, &alice.principal).expect("Failed to find stacker"); + assert_eq!(first_reward_cycle, next_reward_cycle); + assert_eq!(pox_address, alice.pox_address); + + // Verify Bob Stacked + let (pox_address, first_reward_cycle, lock_period, _indices) = + get_stacker_info_pox_4(&mut peer, &bob.principal).expect("Failed to find stacker"); + assert_eq!(first_reward_cycle, next_reward_cycle); + assert_eq!(pox_address, bob.pox_address); + + // Now starting create vote txs + // Fetch signer indices in reward cycle 6 + let alice_index = get_signer_index( + &mut peer, + latest_block, + alice.address.clone(), + next_reward_cycle, + ); + let bob_index = get_signer_index( + &mut peer, + latest_block, + bob.address.clone(), + next_reward_cycle, + ); + // Alice err vote + let alice_vote_err = make_signers_vote_for_aggregate_public_key( + &alice.private_key, + alice.nonce, + bob_index, + &peer_config.aggregate_public_key.unwrap(), + 1, + next_reward_cycle, + ); + alice.nonce += 1; + // Alice expected vote + let alice_vote_expected = make_signers_vote_for_aggregate_public_key( + &alice.private_key, + alice.nonce, + alice_index, + &peer_config.aggregate_public_key.unwrap(), + 1, + next_reward_cycle, + ); + alice.nonce += 1; + // Bob expected vote + let bob_vote_expected = make_signers_vote_for_aggregate_public_key( + &bob.private_key, + bob.nonce, + bob_index, + &peer_config.aggregate_public_key.unwrap(), + 1, + next_reward_cycle, + ); + bob.nonce += 1; + let txs = vec![ + alice_vote_err.clone(), + alice_vote_expected.clone(), + bob_vote_expected.clone(), + ]; + + // Commit vote txs & move to the prepare phase of reward cycle 7 (block 155) + let target_height = peer + .config + .burnchain + .reward_cycle_to_block_height(7 as u64) + .wrapping_add(15); + let (latest_block, tx_block) = + advance_to_block_height(&mut peer, &observer, &txs, &mut peer_nonce, target_height); + + // Check Alice's err vote (err 10 - INVALID_SIGNER_INDEX) + let alice_err_vote = tx_block + .receipts + .get(1) + .unwrap() + .result + .clone() + .expect_result_err() + .unwrap(); + assert_eq!(alice_err_vote, Value::UInt(10)); + + // Check Alice's expected vote + let alice_expected_vote = tx_block + .receipts + .get(2) + .unwrap() + .result + .clone() + .expect_result_ok() + .unwrap(); + assert_eq!(alice_expected_vote, Value::Bool(true)); + + // Check Bob's expected vote + let bob_expected_vote = tx_block + .receipts + .get(3) + .unwrap() + .result + .clone() + .expect_result_ok() + .unwrap(); + assert_eq!(bob_expected_vote, Value::Bool(true)); + + let approved_key = get_approved_aggregate_key(&mut peer, latest_block, next_reward_cycle) + .expect("No approved key found"); + assert_eq!(approved_key, peer_config.aggregate_public_key.unwrap()); + + // Alice stack-extend err tx + let alice_extend_err = make_pox_4_extend( + &alice.private_key, + alice.nonce, + alice.pox_address.clone(), + lock_period, + bob.public_key.clone(), + Some(alice_signature_extend_err.clone()), + u128::MAX, + 1, + ); + alice.nonce += 1; + // Alice stack-extend tx + let alice_extend = make_pox_4_extend( + &alice.private_key, + alice.nonce, + alice.pox_address.clone(), + lock_period, + alice.public_key.clone(), + Some(alice_signature_extend.clone()), + u128::MAX, + 1, + ); + alice.nonce += 1; + // Now starting second round of vote txs + // Fetch signer indices in reward cycle 7 + let alice_index = get_signer_index(&mut peer, latest_block, alice.address.clone(), 7); + // Alice err vote + let alice_vote_expected_err = make_signers_vote_for_aggregate_public_key( + &alice.private_key, + alice.nonce, + alice_index, + &peer_config.aggregate_public_key.unwrap(), + 1, + 7, + ); + alice.nonce += 1; + + let txs = vec![ + alice_extend_err.clone(), + alice_extend.clone(), + alice_vote_expected_err.clone(), + ]; + let target_height = target_height.wrapping_add(1); + let (latest_block, tx_block) = + advance_to_block_height(&mut peer, &observer, &txs, &mut peer_nonce, target_height); + + // Check Alice's err stack-extend tx (err 35 - INVALID_SIGNATURE_PUBKEY) + let alice_err_extend = tx_block + .receipts + .get(1) + .unwrap() + .result + .clone() + .expect_result_err() + .unwrap(); + assert_eq!(alice_err_extend, Value::Int(35)); + + // Check Alice's stack-extend tx + let alice_extend_receipt = tx_block + .receipts + .get(2) + .unwrap() + .result + .clone() + .expect_result_ok() + .unwrap(); + + // Check Alice's expected err vote (err 14 - DUPLICATE_AGGREGATE_PUBLIC_KEY) + let alice_expected_vote_err = tx_block + .receipts + .get(3) + .unwrap() + .result + .clone() + .expect_result_err() + .unwrap(); + assert_eq!(alice_expected_vote_err, Value::UInt(14)); + + // Get approved key & assert that it wasn't sent (None) + let approved_key = get_approved_aggregate_key(&mut peer, latest_block, 7); + assert_eq!(approved_key, None); +} + +// In this test case, Alice delegates twice the stacking minimum to Bob. +// Bob stacks Alice's funds, and then immediately tries to stacks-aggregation-increase. +// This should return a clarity user error. +#[test] +fn delegate_stack_increase_err() { + let lock_period: u128 = 2; + let observer = TestEventObserver::new(); + let (burnchain, mut peer, keys, latest_block, block_height, mut coinbase_nonce) = + prepare_pox4_test(function_name!(), Some(&observer)); + + let alice_nonce = 0; + let alice_key = &keys[0]; + let alice_address = PrincipalData::from(key_to_stacks_addr(alice_key)); + let mut bob_nonce = 0; + let bob_delegate_key = &keys[1]; + let bob_delegate_address = PrincipalData::from(key_to_stacks_addr(bob_delegate_key)); + let min_ustx = get_stacking_minimum(&mut peer, &latest_block); + let signer_sk = StacksPrivateKey::from_seed(&[1, 3, 3, 7]); + let signer_pk = StacksPublicKey::from_private(&signer_sk); + let signer_pk_bytes = signer_pk.to_bytes_compressed(); + let signer_key_val = Value::buff_from(signer_pk_bytes.clone()).unwrap(); + + let pox_addr = PoxAddress::from_legacy( + AddressHashMode::SerializeP2PKH, + key_to_stacks_addr(bob_delegate_key).bytes, + ); + + let next_reward_cycle = 1 + burnchain + .block_height_to_reward_cycle(block_height) + .unwrap(); + + let delegate_stx = make_pox_4_delegate_stx( + alice_key, + alice_nonce, + 2 * min_ustx, + bob_delegate_address.clone(), + None, + Some(pox_addr.clone()), + ); + + let alice_principal = PrincipalData::from(key_to_stacks_addr(alice_key)); + + let delegate_stack_stx = make_pox_4_delegate_stack_stx( + bob_delegate_key, + bob_nonce, + alice_principal, + min_ustx * 2, + pox_addr.clone(), + block_height as u128, + lock_period, + ); + + let txs = vec![delegate_stx, delegate_stack_stx]; + + let latest_block = peer.tenure_with_txs(&txs, &mut coinbase_nonce); + + bob_nonce += 1; + + let signature = make_signer_key_signature( + &pox_addr, + &signer_sk, + next_reward_cycle.into(), + &Pox4SignatureTopic::AggregationIncrease, + 1_u128, + u128::MAX, + 1, + ); + + // Bob's Aggregate Increase + let bobs_aggregate_increase = make_pox_4_aggregation_increase( + &bob_delegate_key, + bob_nonce, + &pox_addr, + next_reward_cycle.into(), + 0, + Some(signature), + &signer_pk, + u128::MAX, + 1, + ); + + let txs = vec![bobs_aggregate_increase]; + + let latest_block = peer.tenure_with_txs(&txs, &mut coinbase_nonce); + + let delegate_transactions = + get_last_block_sender_transactions(&observer, key_to_stacks_addr(bob_delegate_key)); + + let actual_result = delegate_transactions.first().cloned().unwrap().result; + + // Should be a DELEGATION NO REWARD SLOT error + let expected_result = Value::error(Value::Int(28)).unwrap(); assert_eq!(actual_result, expected_result); @@ -6827,32 +8479,274 @@ fn missed_slots_no_unlock() { let bob_lockup = make_simple_pox_4_lock(&bob, &mut peer, 1 * POX_THRESHOLD_STEPS_USTX, 6); - let txs = [alice_lockup, bob_lockup]; + let txs = [alice_lockup, bob_lockup]; + let mut latest_block = peer.tenure_with_txs(&txs, &mut coinbase_nonce); + + // check that the "raw" reward set will contain entries for alice and bob + // for the pox-4 cycles + for cycle_number in first_v4_cycle..first_v4_cycle + 6 { + let cycle_start = burnchain.reward_cycle_to_block_height(cycle_number); + let reward_set_entries = get_reward_set_entries_at(&mut peer, &latest_block, cycle_start); + assert_eq!( + reward_set_entries.len(), + 2, + "Reward set should contain two entries in cycle {cycle_number}" + ); + assert_eq!( + reward_set_entries[0].reward_address.bytes(), + bob_address.bytes.0.to_vec() + ); + assert_eq!( + reward_set_entries[1].reward_address.bytes(), + alice_address.bytes.0.to_vec() + ); + } + + // we'll produce blocks until the next reward cycle gets through the "handled start" code + // this is one block after the reward cycle starts + let height_target = burnchain.reward_cycle_to_block_height(first_v4_cycle) + 1; + let auto_unlock_coinbase = height_target - 1 - EMPTY_SORTITIONS; + + // but first, check that bob has locked tokens at (height_target + 1) + let bob_bal = get_stx_account_at( + &mut peer, + &latest_block, + &bob_address.to_account_principal(), + ); + assert_eq!(bob_bal.amount_locked(), POX_THRESHOLD_STEPS_USTX); + + while get_tip(peer.sortdb.as_ref()).block_height < height_target { + latest_block = peer.tenure_with_txs(&[], &mut coinbase_nonce); + } + + // check that the "raw" reward sets for all cycles contain entries for alice and bob still! + for cycle_number in first_v4_cycle..(first_v4_cycle + 6) { + let cycle_start = burnchain.reward_cycle_to_block_height(cycle_number); + let reward_set_entries = get_reward_set_entries_at(&mut peer, &latest_block, cycle_start); + assert_eq!(reward_set_entries.len(), 2); + assert_eq!( + reward_set_entries[0].reward_address.bytes(), + bob_address.bytes.0.to_vec() + ); + assert_eq!( + reward_set_entries[1].reward_address.bytes(), + alice_address.bytes.0.to_vec() + ); + } + + let expected_unlock_height = burnchain.reward_cycle_to_block_height(first_v4_cycle + 6) - 1; + // now check that bob has an unlock height of `height_target` + let bob_bal = get_stx_account_at( + &mut peer, + &latest_block, + &bob_address.to_account_principal(), + ); + assert_eq!(bob_bal.unlock_height(), expected_unlock_height); + assert_eq!(bob_bal.amount_locked(), POX_THRESHOLD_STEPS_USTX); + + let alice_bal = get_stx_account_at( + &mut peer, + &latest_block, + &alice_address.to_account_principal(), + ); + assert_eq!(alice_bal.unlock_height(), expected_unlock_height); + assert_eq!(alice_bal.amount_locked(), POX_THRESHOLD_STEPS_USTX * 1024); + + // check that the total reward cycle amounts have not decremented + for cycle_number in first_v4_cycle..(first_v4_cycle + 6) { + assert_eq!( + get_reward_cycle_total(&mut peer, &latest_block, cycle_number), + 1025 * POX_THRESHOLD_STEPS_USTX + ); + } + + // check that bob's stacking-state is gone and alice's stacking-state is correct + let bob_state = get_stacking_state_pox( + &mut peer, + &latest_block, + &bob_address.to_account_principal(), + PoxVersions::Pox4.get_name_str(), + ) + .expect("Bob should have stacking-state entry") + .expect_tuple() + .unwrap(); + let reward_indexes_str = bob_state.get("reward-set-indexes").unwrap().to_string(); + assert_eq!(reward_indexes_str, "(u1 u1 u1 u1 u1 u1)"); + + let alice_state = get_stacking_state_pox( + &mut peer, + &latest_block, + &alice_address.to_account_principal(), + PoxVersions::Pox4.get_name_str(), + ) + .expect("Alice should have stacking-state entry") + .expect_tuple() + .unwrap(); + let reward_indexes_str = alice_state.get("reward-set-indexes").unwrap().to_string(); + assert_eq!(reward_indexes_str, "(u0 u0 u0 u0 u0 u0)"); + + // check that bob is still locked at next block + latest_block = peer.tenure_with_txs(&[], &mut coinbase_nonce); + + let bob_bal = get_stx_account_at( + &mut peer, + &latest_block, + &bob_address.to_account_principal(), + ); + assert_eq!(bob_bal.unlock_height(), expected_unlock_height); + assert_eq!(bob_bal.amount_locked(), POX_THRESHOLD_STEPS_USTX); + + // now let's check some tx receipts + + let blocks = observer.get_blocks(); + + let mut alice_txs = HashMap::new(); + let mut bob_txs = HashMap::new(); + let mut coinbase_txs = vec![]; + let mut reward_cycles_in_2_5 = 0u64; + + for b in blocks.into_iter() { + if let Some(ref reward_set_data) = b.reward_set_data { + let signers_set = reward_set_data.reward_set.signers.as_ref().unwrap(); + assert_eq!(signers_set.len(), 1); + assert_eq!( + StacksPublicKey::from_private(&alice).to_bytes_compressed(), + signers_set[0].signing_key.to_vec() + ); + let rewarded_addrs = HashSet::<_>::from_iter( + reward_set_data + .reward_set + .rewarded_addresses + .iter() + .map(|a| a.to_burnchain_repr()), + ); + assert_eq!(rewarded_addrs.len(), 1); + assert_eq!( + reward_set_data.reward_set.rewarded_addresses[0].bytes(), + alice_address.bytes.0.to_vec(), + ); + reward_cycles_in_2_5 += 1; + eprintln!("{:?}", b.reward_set_data) + } + + for (i, r) in b.receipts.into_iter().enumerate() { + if i == 0 { + coinbase_txs.push(r); + continue; + } + match r.transaction { + TransactionOrigin::Stacks(ref t) => { + let addr = t.auth.origin().address_testnet(); + if addr == alice_address { + alice_txs.insert(t.auth.get_origin_nonce(), r); + } else if addr == bob_address { + bob_txs.insert(t.auth.get_origin_nonce(), r); + } + } + _ => {} + } + } + } + + assert_eq!(alice_txs.len(), 1); + assert_eq!(bob_txs.len(), 1); + // only mined one 2.5 reward cycle, but make sure it was picked up in the events loop above + assert_eq!(reward_cycles_in_2_5, 1); + + // all should have committedd okay + assert!( + match bob_txs.get(&0).unwrap().result { + Value::Response(ref r) => r.committed, + _ => false, + }, + "Bob tx0 should have committed okay" + ); + + // Check that the event produced by "handle-unlock" has a well-formed print event + // and that this event is included as part of the coinbase tx + for unlock_coinbase_index in [auto_unlock_coinbase] { + // expect the unlock to occur 1 block after the handle-unlock method was invoked. + let expected_unlock_height = unlock_coinbase_index + EMPTY_SORTITIONS + 1; + let expected_cycle = pox_constants + .block_height_to_reward_cycle(0, expected_unlock_height) + .unwrap(); + assert!( + coinbase_txs[unlock_coinbase_index as usize].events.is_empty(), + "handle-unlock events are coinbase events and there should be no handle-unlock invocation in this test" + ); + } +} + +/// In this test case, we lockup enough to get participation to be non-zero, but not enough to qualify for a reward slot. +#[test] +fn no_lockups_2_5() { + let EXPECTED_FIRST_V2_CYCLE = 8; + // the sim environment produces 25 empty sortitions before + // tenures start being tracked. + let EMPTY_SORTITIONS = 25; + + let (epochs, mut pox_constants) = make_test_epochs_pox(); + pox_constants.pox_4_activation_height = u32::try_from(epochs[7].start_height).unwrap() + 1; + + let mut burnchain = Burnchain::default_unittest( + 0, + &BurnchainHeaderHash::from_hex(BITCOIN_REGTEST_FIRST_BLOCK_HASH).unwrap(), + ); + burnchain.pox_constants = pox_constants.clone(); + + let observer = TestEventObserver::new(); + + let (mut peer, mut keys) = instantiate_pox_peer_with_epoch( + &burnchain, + &function_name!(), + Some(epochs.clone()), + Some(&observer), + ); + + peer.config.check_pox_invariants = None; + + let alice = keys.pop().unwrap(); + let bob = keys.pop().unwrap(); + let alice_address = key_to_stacks_addr(&alice); + let bob_address = key_to_stacks_addr(&bob); + + let mut coinbase_nonce = 0; + + let first_v4_cycle = burnchain + .block_height_to_reward_cycle(burnchain.pox_constants.pox_4_activation_height as u64) + .unwrap() + + 1; + + // produce blocks until epoch 2.5 + while get_tip(peer.sortdb.as_ref()).block_height <= epochs[7].start_height { + peer.tenure_with_txs(&[], &mut coinbase_nonce); + } + + let tip = get_tip(peer.sortdb.as_ref()); + + let bob_lockup = make_simple_pox_4_lock(&bob, &mut peer, 1 * POX_THRESHOLD_STEPS_USTX, 6); + + let txs = [bob_lockup]; let mut latest_block = peer.tenure_with_txs(&txs, &mut coinbase_nonce); - // check that the "raw" reward set will contain entries for alice and bob - // for the pox-4 cycles + // check that the "raw" reward set will contain an entry for bob for cycle_number in first_v4_cycle..first_v4_cycle + 6 { let cycle_start = burnchain.reward_cycle_to_block_height(cycle_number); let reward_set_entries = get_reward_set_entries_at(&mut peer, &latest_block, cycle_start); assert_eq!( reward_set_entries.len(), - 2, - "Reward set should contain two entries in cycle {cycle_number}" + 1, + "Reward set should contain one entry in cycle {cycle_number}" ); assert_eq!( reward_set_entries[0].reward_address.bytes(), bob_address.bytes.0.to_vec() ); - assert_eq!( - reward_set_entries[1].reward_address.bytes(), - alice_address.bytes.0.to_vec() - ); } // we'll produce blocks until the next reward cycle gets through the "handled start" code // this is one block after the reward cycle starts - let height_target = burnchain.reward_cycle_to_block_height(first_v4_cycle) + 1; + let height_target = burnchain.reward_cycle_to_block_height(first_v4_cycle + 1) + 1; let auto_unlock_coinbase = height_target - 1 - EMPTY_SORTITIONS; // but first, check that bob has locked tokens at (height_target + 1) @@ -6867,254 +8761,748 @@ fn missed_slots_no_unlock() { latest_block = peer.tenure_with_txs(&[], &mut coinbase_nonce); } - // check that the "raw" reward sets for all cycles contain entries for alice and bob still! - for cycle_number in first_v4_cycle..(first_v4_cycle + 6) { - let cycle_start = burnchain.reward_cycle_to_block_height(cycle_number); - let reward_set_entries = get_reward_set_entries_at(&mut peer, &latest_block, cycle_start); - assert_eq!(reward_set_entries.len(), 2); - assert_eq!( - reward_set_entries[0].reward_address.bytes(), - bob_address.bytes.0.to_vec() - ); - assert_eq!( - reward_set_entries[1].reward_address.bytes(), - alice_address.bytes.0.to_vec() - ); + let blocks = observer.get_blocks(); + for b in blocks.into_iter() { + if let Some(ref reward_set_data) = b.reward_set_data { + assert_eq!(reward_set_data.reward_set.signers, Some(vec![])); + assert!(reward_set_data.reward_set.rewarded_addresses.is_empty()); + eprintln!("{:?}", b.reward_set_data) + } } +} - let expected_unlock_height = burnchain.reward_cycle_to_block_height(first_v4_cycle + 6) - 1; - // now check that bob has an unlock height of `height_target` - let bob_bal = get_stx_account_at( - &mut peer, - &latest_block, - &bob_address.to_account_principal(), +// In this scenario, two service signers (Alice, Bob), one stacker-signer (Carl), two stacking pool operators (Dave, Eve), & six pool stackers (Frank, Grace, Heidi, Ivan, Judy, Mallory). + +// First Nakamoto Reward Cycle +// First Nakamoto Tenure + +// 1. Franks stacks for 1 reward cycle, Grace stacks for 2 reward cycles & so on…Mallory stacks for 6 reward cycles: (so 6 wallets stacking n, n+1, n+2… cycles) +// 2. Dave asks Alice for 3 signatures +// 3. Eve asks Bob for 3 set-authorizations +// 4. Ivan - Mallory ask Bob to set-approval-authorization +// 5. Carl stx-stacks & self-signs for 3 reward cycle +// 6. In Carl's second reward cycle, he calls stx-extend for 3 more reward cycles +// 7. In Carl's third reward cycle, he calls stx-increase and should fail as he is straddling 2 keys +#[test] +fn test_scenario_five() { + // Alice service signer setup + let mut alice = StackerSignerInfo::new(); + // Bob service signer setup + let mut bob = StackerSignerInfo::new(); + // Carl solo stacker and signer setup + let mut carl = StackerSignerInfo::new(); + // David stacking pool operator (delegating signing to Alice) Setup + let mut david = StackerSignerInfo::new(); + // Eve stacking pool operator (delegating signing to Bob) Setup + let mut eve = StackerSignerInfo::new(); + // Frank pool stacker delegating STX to David + let mut frank = StackerSignerInfo::new(); + // Grace pool stacker delegating STX to David + let mut grace = StackerSignerInfo::new(); + // Heidi pool stacker delegating STX to David + let mut heidi = StackerSignerInfo::new(); + // Ivan pool stacker delegating STX to Eve + let mut ivan = StackerSignerInfo::new(); + // Jude pool stacker delegating STX to Eve + let mut jude = StackerSignerInfo::new(); + // Mallory pool stacker delegating STX to Eve + let mut mallory = StackerSignerInfo::new(); + + let default_initial_balances = 1_000_000_000_000_000_000; + let initial_balances = vec![ + (alice.principal.clone(), default_initial_balances), + (bob.principal.clone(), default_initial_balances), + (carl.principal.clone(), default_initial_balances), + (david.principal.clone(), default_initial_balances), + (eve.principal.clone(), default_initial_balances), + (frank.principal.clone(), default_initial_balances), + (grace.principal.clone(), default_initial_balances), + (heidi.principal.clone(), default_initial_balances), + (ivan.principal.clone(), default_initial_balances), + (jude.principal.clone(), default_initial_balances), + (mallory.principal.clone(), default_initial_balances), + ]; + let observer = TestEventObserver::new(); + let ( + mut peer, + mut peer_nonce, + burn_block_height, + target_height, + reward_cycle, + next_reward_cycle, + min_ustx, + mut peer_config, + ) = pox_4_scenario_test_setup("test_scenario_five", &observer, initial_balances); + + // Lock periods for each stacker + let carl_lock_period = 3; + let frank_lock_period = 1; + let grace_lock_period = 2; + let heidi_lock_period = 3; + let ivan_lock_period = 4; + let jude_lock_period = 5; + let mallory_lock_period = 6; + + let carl_end_burn_height = peer + .config + .burnchain + .reward_cycle_to_block_height(next_reward_cycle.wrapping_add(carl_lock_period) as u64) + as u128; + let frank_end_burn_height = peer + .config + .burnchain + .reward_cycle_to_block_height(next_reward_cycle.wrapping_add(frank_lock_period) as u64) + as u128; + let grace_end_burn_height = peer + .config + .burnchain + .reward_cycle_to_block_height(next_reward_cycle.wrapping_add(grace_lock_period) as u64) + as u128; + let heidi_end_burn_height = peer + .config + .burnchain + .reward_cycle_to_block_height(next_reward_cycle.wrapping_add(heidi_lock_period) as u64) + as u128; + let ivan_end_burn_height = peer + .config + .burnchain + .reward_cycle_to_block_height(next_reward_cycle.wrapping_add(ivan_lock_period) as u64) + as u128; + let jude_end_burn_height = peer + .config + .burnchain + .reward_cycle_to_block_height(next_reward_cycle.wrapping_add(jude_lock_period) as u64) + as u128; + let mallory_end_burn_height = peer + .config + .burnchain + .reward_cycle_to_block_height(next_reward_cycle.wrapping_add(mallory_lock_period) as u64) + as u128; + + // The pool operators should delegate their signing power for as long as their longest stacker + let david_lock_period = heidi_lock_period; + let eve_lock_period = mallory_lock_period; + + let amount = (default_initial_balances / 2).wrapping_sub(1000) as u128; + let carl_signature_for_carl = make_signer_key_signature( + &carl.pox_address, + &carl.private_key, + reward_cycle, + &Pox4SignatureTopic::StackStx, + carl_lock_period, + u128::MAX, + 1, ); - assert_eq!(bob_bal.unlock_height(), expected_unlock_height); - assert_eq!(bob_bal.amount_locked(), POX_THRESHOLD_STEPS_USTX); + let carl_stack_tx = make_pox_4_lockup( + &carl.private_key, + carl.nonce, + amount, + &carl.pox_address, + carl_lock_period, + &carl.public_key, + burn_block_height, + Some(carl_signature_for_carl), + u128::MAX, + 1, + ); + carl.nonce += 1; - let alice_bal = get_stx_account_at( - &mut peer, - &latest_block, - &alice_address.to_account_principal(), + // Frank pool stacker delegating STX to David + let frank_delegate_stx_to_david_tx = make_pox_4_delegate_stx( + &frank.private_key, + frank.nonce, + amount, + david.principal.clone(), + Some(frank_end_burn_height), + Some(david.pox_address.clone()), ); - assert_eq!(alice_bal.unlock_height(), expected_unlock_height); - assert_eq!(alice_bal.amount_locked(), POX_THRESHOLD_STEPS_USTX * 1024); + frank.nonce += 1; - // check that the total reward cycle amounts have not decremented - for cycle_number in first_v4_cycle..(first_v4_cycle + 6) { - assert_eq!( - get_reward_cycle_total(&mut peer, &latest_block, cycle_number), - 1025 * POX_THRESHOLD_STEPS_USTX - ); + // Grace pool stacker delegating STX to David + let grace_delegate_stx_to_david_tx = make_pox_4_delegate_stx( + &grace.private_key, + grace.nonce, + amount, + david.principal.clone(), + Some(grace_end_burn_height), + Some(david.pox_address.clone()), + ); + grace.nonce += 1; + + // Heidi pool stacker delegating STX to David + let heidi_delegate_stx_to_david_tx = make_pox_4_delegate_stx( + &heidi.private_key, + heidi.nonce, + amount, + david.principal.clone(), + Some(heidi_end_burn_height), + Some(david.pox_address.clone()), + ); + heidi.nonce += 1; + + // Ivan pool stacker delegating STX to Eve + let ivan_delegate_stx_to_eve_tx = make_pox_4_delegate_stx( + &ivan.private_key, + ivan.nonce, + amount, + eve.principal.clone(), + Some(ivan_end_burn_height), + Some(eve.pox_address.clone()), + ); + ivan.nonce += 1; + + // Jude pool stacker delegating STX to Eve + let jude_delegate_stx_to_eve_tx = make_pox_4_delegate_stx( + &jude.private_key, + jude.nonce, + amount, + eve.principal.clone(), + Some(jude_end_burn_height), + Some(eve.pox_address.clone()), + ); + jude.nonce += 1; + + // Mallory pool stacker delegating STX to Eve + let mallory_delegate_stx_to_eve_tx = make_pox_4_delegate_stx( + &mallory.private_key, + mallory.nonce, + amount, + eve.principal.clone(), + Some(mallory_end_burn_height), + Some(eve.pox_address.clone()), + ); + mallory.nonce += 1; + + let davids_stackers = &[ + (frank.clone(), frank_lock_period), + (grace.clone(), grace_lock_period), + (heidi.clone(), heidi_lock_period), + ]; + let eves_stackers = &[ + (ivan.clone(), ivan_lock_period), + (jude.clone(), jude_lock_period), + (mallory.clone(), mallory_lock_period), + ]; + + // David calls 'delegate-stack-stx' for each of his stackers + let davids_delegate_stack_stx_txs: Vec<_> = davids_stackers + .iter() + .map(|(stacker, lock_period)| { + let tx = make_pox_4_delegate_stack_stx( + &david.private_key, + david.nonce, + stacker.principal.clone(), + amount, + david.pox_address.clone(), + burn_block_height as u128, + *lock_period, + ); + david.nonce += 1; + tx + }) + .collect(); + + // Eve calls 'delegate-stack-stx' for each of her stackers + let eves_delegate_stack_stx_txs: Vec<_> = eves_stackers + .iter() + .map(|(stacker, lock_period)| { + let tx = make_pox_4_delegate_stack_stx( + &eve.private_key, + eve.nonce, + stacker.principal.clone(), + amount, + eve.pox_address.clone(), + burn_block_height as u128, + *lock_period, // Must be called every reward cycle, therefore only ever lasts for 1 lock period + ); + eve.nonce += 1; + tx + }) + .collect(); + + // Alice's authorization for David to aggregate commit + let alice_authorization_for_david = make_signer_key_signature( + &david.pox_address, + &alice.private_key, + next_reward_cycle, + &Pox4SignatureTopic::AggregationCommit, + 1, + u128::MAX, + 1, + ); + + // David aggregate commits + let davids_aggregate_commit_index_tx = make_pox_4_aggregation_commit_indexed( + &david.private_key, + david.nonce, + &david.pox_address, + next_reward_cycle, + Some(alice_authorization_for_david), + &alice.public_key, + u128::MAX, + 1, + ); + david.nonce += 1; + + // Bob's authorization for Eve to aggregate commit + let bob_authorization_for_eve = make_signer_key_signature( + &eve.pox_address, + &bob.private_key, + next_reward_cycle, + &Pox4SignatureTopic::AggregationCommit, + 1, + u128::MAX, + 1, + ); + + // Eve aggregate commits + let eves_aggregate_commit_index_tx = make_pox_4_aggregation_commit_indexed( + &eve.private_key, + eve.nonce, + &eve.pox_address, + next_reward_cycle, + Some(bob_authorization_for_eve), + &bob.public_key, + u128::MAX, + 1, + ); + eve.nonce += 1; + + let mut txs = vec![ + frank_delegate_stx_to_david_tx, + grace_delegate_stx_to_david_tx, + heidi_delegate_stx_to_david_tx, + ivan_delegate_stx_to_eve_tx, + jude_delegate_stx_to_eve_tx, + mallory_delegate_stx_to_eve_tx, + carl_stack_tx, + ]; + txs.extend(davids_delegate_stack_stx_txs); + txs.extend(eves_delegate_stack_stx_txs); + txs.extend(vec![ + davids_aggregate_commit_index_tx, + eves_aggregate_commit_index_tx, + ]); + + // Advance to reward set calculation of the next reward cycle + let target_height = peer + .config + .burnchain + .reward_cycle_to_block_height(next_reward_cycle as u64) + .saturating_sub(peer_config.burnchain.pox_constants.prepare_length as u64) + .wrapping_add(2); + let (latest_block, tx_block) = + advance_to_block_height(&mut peer, &observer, &txs, &mut peer_nonce, target_height); + + // Check that all of David's stackers have been added to the reward set + for (stacker, stacker_lock_period) in davids_stackers { + let (pox_address, first_reward_cycle, lock_period, _indices) = + get_stacker_info_pox_4(&mut peer, &stacker.principal).expect("Failed to find stacker"); + assert_eq!(first_reward_cycle, next_reward_cycle); + assert_eq!(pox_address, david.pox_address); + assert_eq!(lock_period, *stacker_lock_period); } - // check that bob's stacking-state is gone and alice's stacking-state is correct - let bob_state = get_stacking_state_pox( - &mut peer, - &latest_block, - &bob_address.to_account_principal(), - PoxVersions::Pox4.get_name_str(), - ) - .expect("Bob should have stacking-state entry") - .expect_tuple() - .unwrap(); - let reward_indexes_str = bob_state.get("reward-set-indexes").unwrap().to_string(); - assert_eq!(reward_indexes_str, "(u1 u1 u1 u1 u1 u1)"); + // Check that all of Eve's stackers have been added to the reward set + for (stacker, stacker_lock_period) in eves_stackers { + let (pox_address, first_reward_cycle, lock_period, _indices) = + get_stacker_info_pox_4(&mut peer, &stacker.principal).expect("Failed to find stacker"); + assert_eq!(first_reward_cycle, next_reward_cycle); + assert_eq!(pox_address, eve.pox_address); + assert_eq!(lock_period, *stacker_lock_period); + } + // Check that Carl's stacker has been added to the reward set + let (pox_address, first_reward_cycle, lock_period, _indices) = + get_stacker_info_pox_4(&mut peer, &carl.principal).expect("Failed to find stacker"); + assert_eq!(first_reward_cycle, next_reward_cycle); + assert_eq!(pox_address, carl.pox_address); + assert_eq!(lock_period, carl_lock_period); + + // Verify stacker transactions + let mut observed_txs = HashSet::new(); + for tx_receipt in tx_block.receipts { + if let TransactionOrigin::Stacks(ref tx) = tx_receipt.transaction { + observed_txs.insert(tx.txid()); + } + } - let alice_state = get_stacking_state_pox( - &mut peer, - &latest_block, - &alice_address.to_account_principal(), - PoxVersions::Pox4.get_name_str(), - ) - .expect("Alice should have stacking-state entry") - .expect_tuple() - .unwrap(); - let reward_indexes_str = alice_state.get("reward-set-indexes").unwrap().to_string(); - assert_eq!(reward_indexes_str, "(u0 u0 u0 u0 u0 u0)"); + for tx in &txs { + let txid = tx.txid(); + if !observed_txs.contains(&txid) { + panic!("Failed to find stacking transaction ({txid}) in observed transactions") + } + } - // check that bob is still locked at next block - latest_block = peer.tenure_with_txs(&[], &mut coinbase_nonce); + let cycle_id = next_reward_cycle; + // Create vote txs for each signer + let alice_index = get_signer_index(&mut peer, latest_block, alice.address.clone(), cycle_id); + let bob_index = get_signer_index(&mut peer, latest_block, bob.address.clone(), cycle_id); + let carl_index = get_signer_index(&mut peer, latest_block, carl.address.clone(), cycle_id); + let alice_vote = make_signers_vote_for_aggregate_public_key( + &alice.private_key, + alice.nonce, + alice_index, + &peer_config.aggregate_public_key.unwrap(), + 1, + next_reward_cycle, + ); + let bob_vote = make_signers_vote_for_aggregate_public_key( + &bob.private_key, + bob.nonce, + bob_index, + &peer_config.aggregate_public_key.unwrap(), + 1, + next_reward_cycle, + ); + let carl_vote = make_signers_vote_for_aggregate_public_key( + &carl.private_key, + carl.nonce, + carl_index, + &peer_config.aggregate_public_key.unwrap(), + 1, + next_reward_cycle, + ); + let vote_txs = vec![alice_vote, bob_vote, carl_vote]; + alice.nonce += 1; + bob.nonce += 1; + carl.nonce += 1; - let bob_bal = get_stx_account_at( + // Mine vote txs & advance to the reward set calculation of the next reward cycle + let target_height = peer + .config + .burnchain + .reward_cycle_to_block_height(next_reward_cycle as u64); + let (latest_block, tx_block) = advance_to_block_height( &mut peer, - &latest_block, - &bob_address.to_account_principal(), + &observer, + &vote_txs, + &mut peer_nonce, + target_height, + ); + + let mut observed_txs = HashSet::new(); + for tx_receipt in tx_block.receipts { + if let TransactionOrigin::Stacks(ref tx) = tx_receipt.transaction { + observed_txs.insert(tx.txid()); + } + } + + for tx in &vote_txs { + let txid = tx.txid(); + if !observed_txs.contains(&txid) { + panic!("Failed to find vote transaction ({txid}) in observed transactions") + } + } + let approved_key = get_approved_aggregate_key(&mut peer, latest_block, next_reward_cycle) + .expect("No approved key found"); + assert_eq!(approved_key, peer_config.aggregate_public_key.unwrap()); + + // Stack for following reward cycle again and then advance to epoch 3.0 activation boundary + let reward_cycle = peer.get_reward_cycle() as u128; + let next_reward_cycle = reward_cycle.wrapping_add(1); + let carl_lock_period = carl_lock_period.wrapping_add(3); // Carl's total lock period is now 5 + let carl_signature_for_carl = make_signer_key_signature( + &carl.pox_address, + &carl.private_key, + reward_cycle, + &Pox4SignatureTopic::StackExtend, + 3, + u128::MAX, + 2, + ); + // Carl extends his lock period by 3 cycles + let carl_extend_tx = make_pox_4_extend( + &carl.private_key, + carl.nonce, + carl.pox_address.clone(), + 3, + carl.public_key, + Some(carl_signature_for_carl), + u128::MAX, + 2, ); - assert_eq!(bob_bal.unlock_height(), expected_unlock_height); - assert_eq!(bob_bal.amount_locked(), POX_THRESHOLD_STEPS_USTX); - - // now let's check some tx receipts + carl.nonce += 1; + let alice_authorization_for_david = make_signer_key_signature( + &david.pox_address, + &alice.private_key, + next_reward_cycle, + &Pox4SignatureTopic::AggregationCommit, + 1, + u128::MAX, + 2, + ); + // David commits his aggregate for the next reward cycle + let davids_aggregate_commit_index_tx = make_pox_4_aggregation_commit_indexed( + &david.private_key, + david.nonce, + &david.pox_address, + next_reward_cycle, + Some(alice_authorization_for_david), + &alice.public_key, + u128::MAX, + 2, + ); + david.nonce += 1; - let blocks = observer.get_blocks(); + let bob_authorization_for_eve = make_signer_key_signature( + &eve.pox_address, + &bob.private_key, + next_reward_cycle, + &Pox4SignatureTopic::AggregationCommit, + 1, + u128::MAX, + 2, + ); + // Eve commits her aggregate for the next reward cycle + let eves_aggregate_commit_index_tx = make_pox_4_aggregation_commit_indexed( + &eve.private_key, + eve.nonce, + &eve.pox_address, + next_reward_cycle, + Some(bob_authorization_for_eve), + &bob.public_key, + u128::MAX, + 2, + ); + eve.nonce += 1; - let mut alice_txs = HashMap::new(); - let mut bob_txs = HashMap::new(); - let mut coinbase_txs = vec![]; - let mut reward_cycles_in_2_5 = 0u64; + let txs = vec![ + carl_extend_tx, + davids_aggregate_commit_index_tx, + eves_aggregate_commit_index_tx, + ]; - for b in blocks.into_iter() { - if let Some(ref reward_set_data) = b.reward_set_data { - let signers_set = reward_set_data.reward_set.signers.as_ref().unwrap(); - assert_eq!(signers_set.len(), 1); - assert_eq!( - StacksPublicKey::from_private(&alice).to_bytes_compressed(), - signers_set[0].signing_key.to_vec() - ); - let rewarded_addrs = HashSet::<_>::from_iter( - reward_set_data - .reward_set - .rewarded_addresses - .iter() - .map(|a| a.to_burnchain_repr()), - ); - assert_eq!(rewarded_addrs.len(), 1); - assert_eq!( - reward_set_data.reward_set.rewarded_addresses[0].bytes(), - alice_address.bytes.0.to_vec(), - ); - reward_cycles_in_2_5 += 1; - eprintln!("{:?}", b.reward_set_data) + let target_height = peer + .config + .burnchain + .reward_cycle_to_block_height(next_reward_cycle as u64) + .saturating_sub(peer_config.burnchain.pox_constants.prepare_length as u64) + .wrapping_add(2); + let (latest_block, tx_block) = + advance_to_block_height(&mut peer, &observer, &txs, &mut peer_nonce, target_height); + + // Check that all of David's stackers are stacked + for (stacker, stacker_lock_period) in davids_stackers { + let (pox_address, first_reward_cycle, lock_period, _indices) = + get_stacker_info_pox_4(&mut peer, &stacker.principal).expect("Failed to find stacker"); + assert_eq!(first_reward_cycle, reward_cycle); + assert_eq!(pox_address, david.pox_address); + assert_eq!(lock_period, *stacker_lock_period); + } + // Check that all of Eve's stackers are stacked + for (stacker, stacker_lock_period) in eves_stackers { + let (pox_address, first_reward_cycle, lock_period, _indices) = + get_stacker_info_pox_4(&mut peer, &stacker.principal).expect("Failed to find stacker"); + assert_eq!(first_reward_cycle, reward_cycle); + assert_eq!(pox_address, eve.pox_address); + assert_eq!(lock_period, *stacker_lock_period); + } + let (pox_address, first_reward_cycle, lock_period, _indices) = + get_stacker_info_pox_4(&mut peer, &carl.principal).expect("Failed to find stacker"); + assert_eq!(first_reward_cycle, reward_cycle); + assert_eq!(pox_address, carl.pox_address); + assert_eq!(lock_period, carl_lock_period); + + // Verify stacker transactions + let mut observed_txs = HashSet::new(); + for tx_receipt in tx_block.receipts { + if let TransactionOrigin::Stacks(ref tx) = tx_receipt.transaction { + observed_txs.insert(tx.txid()); } + } - for (i, r) in b.receipts.into_iter().enumerate() { - if i == 0 { - coinbase_txs.push(r); - continue; - } - match r.transaction { - TransactionOrigin::Stacks(ref t) => { - let addr = t.auth.origin().address_testnet(); - if addr == alice_address { - alice_txs.insert(t.auth.get_origin_nonce(), r); - } else if addr == bob_address { - bob_txs.insert(t.auth.get_origin_nonce(), r); - } - } - _ => {} - } + for tx in &txs { + let txid = tx.txid(); + if !observed_txs.contains(&txid) { + panic!("Failed to find stacking transaction ({txid}) in observed transactions") } } - assert_eq!(alice_txs.len(), 1); - assert_eq!(bob_txs.len(), 1); - // only mined one 2.5 reward cycle, but make sure it was picked up in the events loop above - assert_eq!(reward_cycles_in_2_5, 1); + let cycle_id = next_reward_cycle; + // Generate next cycle aggregate public key + peer_config.aggregate_public_key = Some( + peer_config + .test_signers + .unwrap() + .generate_aggregate_key(cycle_id as u64), + ); + // create vote txs + let alice_index = get_signer_index(&mut peer, latest_block, alice.address.clone(), cycle_id); + let bob_index = get_signer_index(&mut peer, latest_block, bob.address.clone(), cycle_id); + let carl_index = get_signer_index(&mut peer, latest_block, carl.address.clone(), cycle_id); + let alice_vote = make_signers_vote_for_aggregate_public_key( + &alice.private_key, + alice.nonce, + alice_index, + &peer_config.aggregate_public_key.unwrap(), + 1, + next_reward_cycle, + ); + let bob_vote = make_signers_vote_for_aggregate_public_key( + &bob.private_key, + bob.nonce, + bob_index, + &peer_config.aggregate_public_key.unwrap(), + 1, + next_reward_cycle, + ); + let carl_vote = make_signers_vote_for_aggregate_public_key( + &carl.private_key, + carl.nonce, + carl_index, + &peer_config.aggregate_public_key.unwrap(), + 1, + next_reward_cycle, + ); + let vote_txs = vec![alice_vote, bob_vote, carl_vote]; + alice.nonce += 1; + bob.nonce += 1; + carl.nonce += 1; - // all should have committedd okay - assert!( - match bob_txs.get(&0).unwrap().result { - Value::Response(ref r) => r.committed, - _ => false, - }, - "Bob tx0 should have committed okay" + let target_height = peer + .config + .burnchain + .reward_cycle_to_block_height(next_reward_cycle as u64); + // Submit vote transactions + let (latest_block, tx_block) = advance_to_block_height( + &mut peer, + &observer, + &vote_txs, + &mut peer_nonce, + target_height, ); - // Check that the event produced by "handle-unlock" has a well-formed print event - // and that this event is included as part of the coinbase tx - for unlock_coinbase_index in [auto_unlock_coinbase] { - // expect the unlock to occur 1 block after the handle-unlock method was invoked. - let expected_unlock_height = unlock_coinbase_index + EMPTY_SORTITIONS + 1; - let expected_cycle = pox_constants - .block_height_to_reward_cycle(0, expected_unlock_height) - .unwrap(); - assert!( - coinbase_txs[unlock_coinbase_index as usize].events.is_empty(), - "handle-unlock events are coinbase events and there should be no handle-unlock invocation in this test" - ); + let mut observed_txs = HashSet::new(); + for tx_receipt in tx_block.receipts { + if let TransactionOrigin::Stacks(ref tx) = tx_receipt.transaction { + observed_txs.insert(tx.txid()); + } } -} -/// In this test case, we lockup enough to get participation to be non-zero, but not enough to qualify for a reward slot. -#[test] -fn no_lockups_2_5() { - let EXPECTED_FIRST_V2_CYCLE = 8; - // the sim environment produces 25 empty sortitions before - // tenures start being tracked. - let EMPTY_SORTITIONS = 25; + for tx in &vote_txs { + let txid = tx.txid(); + if !observed_txs.contains(&txid) { + panic!("Failed to find vote transaction ({txid}) in observed transactions") + } + } + let approved_key = get_approved_aggregate_key(&mut peer, latest_block, next_reward_cycle) + .expect("No approved key found"); + assert_eq!(approved_key, peer_config.aggregate_public_key.unwrap()); - let (epochs, mut pox_constants) = make_test_epochs_pox(); - pox_constants.pox_4_activation_height = u32::try_from(epochs[7].start_height).unwrap() + 1; + // Let us start stacking for the following reward cycle + let current_reward_cycle = peer.get_reward_cycle() as u128; + let next_reward_cycle = current_reward_cycle.wrapping_add(1); - let mut burnchain = Burnchain::default_unittest( - 0, - &BurnchainHeaderHash::from_hex(BITCOIN_REGTEST_FIRST_BLOCK_HASH).unwrap(), + let alice_authorization_for_david = make_signer_key_signature( + &david.pox_address, + &alice.private_key, + next_reward_cycle, + &Pox4SignatureTopic::AggregationCommit, + 1, + u128::MAX, + 3, ); - burnchain.pox_constants = pox_constants.clone(); - - let observer = TestEventObserver::new(); - let (mut peer, mut keys) = instantiate_pox_peer_with_epoch( - &burnchain, - &function_name!(), - Some(epochs.clone()), - Some(&observer), + let davids_aggregate_commit_index_tx = make_pox_4_aggregation_commit_indexed( + &david.private_key, + david.nonce, + &david.pox_address, + next_reward_cycle, + Some(alice_authorization_for_david), + &alice.public_key, + u128::MAX, + 3, ); + david.nonce += 1; - peer.config.check_pox_invariants = None; - - let alice = keys.pop().unwrap(); - let bob = keys.pop().unwrap(); - let alice_address = key_to_stacks_addr(&alice); - let bob_address = key_to_stacks_addr(&bob); + let bob_authorization_for_eve = make_signer_key_signature( + &eve.pox_address, + &bob.private_key, + next_reward_cycle, + &Pox4SignatureTopic::AggregationCommit, + 1, + u128::MAX, + 3, + ); - let mut coinbase_nonce = 0; + let eves_aggregate_commit_index_tx = make_pox_4_aggregation_commit_indexed( + &eve.private_key, + eve.nonce, + &eve.pox_address, + next_reward_cycle, + Some(bob_authorization_for_eve), + &bob.public_key, + u128::MAX, + 3, + ); + eve.nonce += 1; - let first_v4_cycle = burnchain - .block_height_to_reward_cycle(burnchain.pox_constants.pox_4_activation_height as u64) - .unwrap() - + 1; + // Carl attempts a stx-increase using Alice's key instead of his own + // Should fail as he already has delegated his signing power to himself + let alice_signature_for_carl = make_signer_key_signature( + &carl.pox_address, + &alice.private_key, + current_reward_cycle, + &Pox4SignatureTopic::StackIncrease, + carl_lock_period, + u128::MAX, + 4, + ); - // produce blocks until epoch 2.5 - while get_tip(peer.sortdb.as_ref()).block_height <= epochs[7].start_height { - peer.tenure_with_txs(&[], &mut coinbase_nonce); - } + let carl_increase_tx = make_pox_4_stack_increase( + &carl.private_key, + carl.nonce, + amount, + &alice.public_key, + Some(alice_signature_for_carl), + u128::MAX, + 4, + ); + carl.nonce += 1; - let tip = get_tip(peer.sortdb.as_ref()); + let txs = vec![ + carl_increase_tx, + davids_aggregate_commit_index_tx, + eves_aggregate_commit_index_tx, + ]; - let bob_lockup = make_simple_pox_4_lock(&bob, &mut peer, 1 * POX_THRESHOLD_STEPS_USTX, 6); + let target_height = peer + .config + .burnchain + .reward_cycle_to_block_height(next_reward_cycle as u64) + .saturating_sub(peer_config.burnchain.pox_constants.prepare_length as u64) + .wrapping_add(2); + // This assertion just makes testing logic a bit easier + let davids_stackers = &[ + (grace.clone(), grace_lock_period), + (heidi.clone(), heidi_lock_period), + ]; - let txs = [bob_lockup]; - let mut latest_block = peer.tenure_with_txs(&txs, &mut coinbase_nonce); + let (latest_block, tx_block) = + advance_to_block_height(&mut peer, &observer, &txs, &mut peer_nonce, target_height); - // check that the "raw" reward set will contain an entry for bob - for cycle_number in first_v4_cycle..first_v4_cycle + 6 { - let cycle_start = burnchain.reward_cycle_to_block_height(cycle_number); - let reward_set_entries = get_reward_set_entries_at(&mut peer, &latest_block, cycle_start); - assert_eq!( - reward_set_entries.len(), - 1, - "Reward set should contain one entry in cycle {cycle_number}" - ); - assert_eq!( - reward_set_entries[0].reward_address.bytes(), - bob_address.bytes.0.to_vec() - ); + for (stacker, _) in davids_stackers { + let (pox_address, first_reward_cycle, _lock_period, _indices) = + get_stacker_info_pox_4(&mut peer, &stacker.principal).expect("Failed to find stacker"); + assert_eq!(first_reward_cycle, reward_cycle); + assert_eq!(pox_address, david.pox_address); } - - // we'll produce blocks until the next reward cycle gets through the "handled start" code - // this is one block after the reward cycle starts - let height_target = burnchain.reward_cycle_to_block_height(first_v4_cycle + 1) + 1; - let auto_unlock_coinbase = height_target - 1 - EMPTY_SORTITIONS; - - // but first, check that bob has locked tokens at (height_target + 1) - let bob_bal = get_stx_account_at( - &mut peer, - &latest_block, - &bob_address.to_account_principal(), - ); - assert_eq!(bob_bal.amount_locked(), POX_THRESHOLD_STEPS_USTX); - - while get_tip(peer.sortdb.as_ref()).block_height < height_target { - latest_block = peer.tenure_with_txs(&[], &mut coinbase_nonce); + // Frank should no longer be considered a stacker as his lock period has expired + assert!(get_stacker_info_pox_4(&mut peer, &frank.principal).is_none()); + + for (stacker, _) in eves_stackers { + let (pox_address, first_reward_cycle, _lock_period, _indices) = + get_stacker_info_pox_4(&mut peer, &stacker.principal).expect("Failed to find stacker"); + assert_eq!(first_reward_cycle, reward_cycle); + assert_eq!(pox_address, eve.pox_address); } - let blocks = observer.get_blocks(); - for b in blocks.into_iter() { - if let Some(ref reward_set_data) = b.reward_set_data { - assert_eq!(reward_set_data.reward_set.signers, Some(vec![])); - assert!(reward_set_data.reward_set.rewarded_addresses.is_empty()); - eprintln!("{:?}", b.reward_set_data) - } - } + let (pox_address, first_reward_cycle, _lock_period, _indices) = + get_stacker_info_pox_4(&mut peer, &carl.principal).expect("Failed to find stacker"); + assert_eq!(first_reward_cycle, reward_cycle); + assert_eq!(pox_address, carl.pox_address); + + // Assert that carl's error is err(40) + let carl_increase_err = tx_block.receipts[1].clone().result; + assert_eq!(carl_increase_err, Value::error(Value::Int(40)).unwrap()); } diff --git a/stackslib/src/chainstate/stacks/boot/signers_tests.rs b/stackslib/src/chainstate/stacks/boot/signers_tests.rs index a97a0c1e09c..37b2e016b75 100644 --- a/stackslib/src/chainstate/stacks/boot/signers_tests.rs +++ b/stackslib/src/chainstate/stacks/boot/signers_tests.rs @@ -349,7 +349,11 @@ pub fn prepare_signers_test<'a>( stackers: &[TestStacker], observer: Option<&'a TestEventObserver>, ) -> (TestPeer<'a>, TestSigners, StacksBlockId, u128) { - let mut test_signers = TestSigners::default(); + let signer_keys = stackers + .iter() + .map(|s| s.signer_private_key.clone()) + .collect::>(); + let mut test_signers = TestSigners::new(signer_keys); let mut peer = boot_nakamoto( test_name, @@ -483,26 +487,30 @@ pub fn readonly_call_with_sortdb( args: Vec, ) -> Value { chainstate - .with_read_only_clarity_tx(&sortdb.index_conn(), tip, |connection| { - connection - .with_readonly_clarity_env( - false, - 0x80000000, - ClarityVersion::Clarity2, - PrincipalData::from(boot_code_addr(false)), - None, - LimitedCostTracker::new_free(), - |env| { - env.execute_contract_allow_private( - &boot_code_id(&boot_contract, false), - &function_name, - &symbols_from_values(args), - true, - ) - }, - ) - .unwrap() - }) + .with_read_only_clarity_tx( + &sortdb.index_handle_at_block(chainstate, tip).unwrap(), + tip, + |connection| { + connection + .with_readonly_clarity_env( + false, + 0x80000000, + ClarityVersion::Clarity2, + PrincipalData::from(boot_code_addr(false)), + None, + LimitedCostTracker::new_free(), + |env| { + env.execute_contract_allow_private( + &boot_code_id(&boot_contract, false), + &function_name, + &symbols_from_values(args), + true, + ) + }, + ) + .unwrap() + }, + ) .unwrap() } diff --git a/stackslib/src/chainstate/stacks/boot/signers_voting_tests.rs b/stackslib/src/chainstate/stacks/boot/signers_voting_tests.rs deleted file mode 100644 index aef41ef4a51..00000000000 --- a/stackslib/src/chainstate/stacks/boot/signers_voting_tests.rs +++ /dev/null @@ -1,2227 +0,0 @@ -// Copyright (C) 2013-2020 Blockstack PBC, a public benefit corporation -// Copyright (C) 2020-2024 Stacks Open Internet Foundation -// -// This program is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// This program is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. -// -// You should have received a copy of the GNU General Public License -// along with this program. If not, see . - -use std::collections::{HashMap, HashSet, VecDeque}; - -use clarity::boot_util::boot_code_addr; -use clarity::vm::clarity::ClarityConnection; -use clarity::vm::contexts::OwnedEnvironment; -use clarity::vm::contracts::Contract; -use clarity::vm::costs::{CostOverflowingMath, LimitedCostTracker}; -use clarity::vm::database::*; -use clarity::vm::errors::{ - CheckErrors, Error, IncomparableError, InterpreterError, InterpreterResult, RuntimeErrorType, -}; -use clarity::vm::eval; -use clarity::vm::events::StacksTransactionEvent; -use clarity::vm::representations::SymbolicExpression; -use clarity::vm::tests::{execute, is_committed, is_err_code, symbols_from_values}; -use clarity::vm::types::{ - BuffData, OptionalData, PrincipalData, QualifiedContractIdentifier, ResponseData, SequenceData, - StacksAddressExtensions, StandardPrincipalData, TupleData, TupleTypeSignature, TypeSignature, - Value, NONE, -}; -use stacks_common::address::AddressHashMode; -use stacks_common::types::chainstate::{ - BlockHeaderHash, BurnchainHeaderHash, StacksAddress, StacksBlockId, VRFSeed, -}; -use stacks_common::types::{Address, PrivateKey}; -use stacks_common::util::hash::{hex_bytes, to_hex, Sha256Sum, Sha512Trunc256Sum}; -use stacks_common::util::secp256k1::Secp256k1PrivateKey; -use wsts::curve::point::Point; -use wsts::curve::scalar::Scalar; - -use super::test::*; -use super::RawRewardSetEntry; -use crate::burnchains::{Burnchain, PoxConstants}; -use crate::chainstate::burn::db::sortdb::{self, SortitionDB}; -use crate::chainstate::burn::operations::*; -use crate::chainstate::burn::{BlockSnapshot, ConsensusHash}; -use crate::chainstate::nakamoto::coordinator::tests::make_token_transfer; -use crate::chainstate::nakamoto::test_signers::TestSigners; -use crate::chainstate::nakamoto::tests::get_account; -use crate::chainstate::nakamoto::tests::node::TestStacker; -use crate::chainstate::nakamoto::NakamotoBlock; -use crate::chainstate::stacks::address::{PoxAddress, PoxAddressType20, PoxAddressType32}; -use crate::chainstate::stacks::boot::pox_2_tests::{ - check_pox_print_event, generate_pox_clarity_value, get_reward_set_entries_at, - get_stacking_state_pox, get_stx_account_at, with_clarity_db_ro, PoxPrintFields, - StackingStateCheckData, -}; -use crate::chainstate::stacks::boot::pox_4_tests::{ - assert_latest_was_burn, get_last_block_sender_transactions, get_tip, make_test_epochs_pox, -}; -use crate::chainstate::stacks::boot::signers_tests::{ - get_signer_index, prepare_signers_test, readonly_call, -}; -use crate::chainstate::stacks::boot::{ - BOOT_CODE_COST_VOTING_TESTNET as BOOT_CODE_COST_VOTING, BOOT_CODE_POX_TESTNET, SIGNERS_NAME, - SIGNERS_VOTING_NAME, -}; -use crate::chainstate::stacks::db::{ - MinerPaymentSchedule, StacksChainState, StacksHeaderInfo, MINER_REWARD_MATURITY, -}; -use crate::chainstate::stacks::events::{StacksTransactionReceipt, TransactionOrigin}; -use crate::chainstate::stacks::index::marf::MarfConnection; -use crate::chainstate::stacks::index::MarfTrieId; -use crate::chainstate::stacks::tests::make_coinbase; -use crate::chainstate::stacks::*; -use crate::chainstate::{self}; -use crate::clarity_vm::clarity::{ClarityBlockConnection, Error as ClarityError}; -use crate::clarity_vm::database::marf::{MarfedKV, WritableMarfStore}; -use crate::clarity_vm::database::HeadersDBConn; -use crate::core::*; -use crate::net::test::{TestEventObserver, TestPeer}; -use crate::util_lib::boot::boot_code_id; -use crate::util_lib::db::{DBConn, FromRow}; - -pub fn prepare_pox4_test<'a>( - test_name: &str, - observer: Option<&'a TestEventObserver>, -) -> ( - Burnchain, - TestPeer<'a>, - Vec, - StacksBlockId, - u64, - usize, -) { - let (epochs, pox_constants) = make_test_epochs_pox(); - - let mut burnchain = Burnchain::default_unittest( - 0, - &BurnchainHeaderHash::from_hex(BITCOIN_REGTEST_FIRST_BLOCK_HASH).unwrap(), - ); - burnchain.pox_constants = pox_constants.clone(); - - let (mut peer, keys) = - instantiate_pox_peer_with_epoch(&burnchain, test_name, Some(epochs.clone()), observer); - - assert_eq!(burnchain.pox_constants.reward_slots(), 6); - let mut coinbase_nonce = 0; - - // Advance into pox4 - let target_height = burnchain.pox_constants.pox_4_activation_height; - let mut latest_block = peer.tenure_with_txs(&[], &mut coinbase_nonce); - while get_tip(peer.sortdb.as_ref()).block_height < u64::from(target_height) { - latest_block = peer.tenure_with_txs(&[], &mut coinbase_nonce); - // if we reach epoch 2.1, perform the check - if get_tip(peer.sortdb.as_ref()).block_height > epochs[3].start_height { - assert_latest_was_burn(&mut peer); - } - } - - let block_height = get_tip(peer.sortdb.as_ref()).block_height; - - info!("Block height: {}", block_height); - - ( - burnchain, - peer, - keys, - latest_block, - block_height, - coinbase_nonce, - ) -} - -/// In this test case, Alice & Bob both successfully vote for the same key. -/// Alice votes successfully, then Bob votes successfully, reaching the -/// threshold and setting the aggregate public key. -#[test] -fn vote_for_aggregate_public_key_success() { - // Test setup - let alice = TestStacker::from_seed(&[3, 4]); - let bob = TestStacker::from_seed(&[5, 6]); - let observer = TestEventObserver::new(); - - // Alice - Signer 1 - let alice_key = &alice.signer_private_key; - let alice_address = key_to_stacks_addr(alice_key); - let alice_principal = PrincipalData::from(alice_address); - - // Bob - Signer 2 - let bob_key = &bob.signer_private_key; - let bob_address = key_to_stacks_addr(bob_key); - let bob_principal = PrincipalData::from(bob_address); - - let (mut peer, mut test_signers, latest_block_id, current_reward_cycle) = prepare_signers_test( - function_name!(), - vec![ - (alice_principal.clone(), 1000), - (bob_principal.clone(), 1000), - ], - &[alice.clone(), bob.clone()], - Some(&observer), - ); - - // Alice and Bob will each have voted once while booting to Nakamoto - let alice_nonce = 1; - let bob_nonce = 1; - - let cycle_id = current_reward_cycle; - - // create vote txs - let alice_index = get_signer_index(&mut peer, latest_block_id, alice_address, cycle_id); - let bob_index = get_signer_index(&mut peer, latest_block_id, bob_address, cycle_id); - - let mut signers = TestSigners::default(); - let aggregate_key = signers.generate_aggregate_key(cycle_id as u64 + 1); - let aggregate_public_key = Value::buff_from(aggregate_key.compress().data.to_vec()) - .expect("Failed to serialize aggregate public key"); - - let aggregate_public_key_ill_formed = Value::buff_from_byte(0x00); - - let txs = vec![ - // Alice casts vote correctly - make_signers_vote_for_aggregate_public_key_value( - alice_key, - alice_nonce, - alice_index, - aggregate_public_key.clone(), - 0, - cycle_id + 1, - ), - // Bob casts a vote correctly - make_signers_vote_for_aggregate_public_key_value( - bob_key, - bob_nonce, - bob_index, - aggregate_public_key.clone(), - 0, - cycle_id + 1, - ), - ]; - - // - // vote in the first burn block of prepare phase - // - let blocks_and_sizes = nakamoto_tenure(&mut peer, &mut test_signers, vec![txs]); - - // check the last two txs in the last block - let block = observer.get_blocks().last().unwrap().clone(); - let receipts = block.receipts.as_slice(); - assert_eq!(receipts.len(), 4); - // ignore tenure change tx - // ignore tenure coinbase tx - - // Alice's vote should succeed - let alice_vote_tx = &receipts[2]; - assert_eq!(alice_vote_tx.result, Value::okay_true()); - assert_eq!(alice_vote_tx.events.len(), 1); - let alice_vote_event = &alice_vote_tx.events[0]; - if let StacksTransactionEvent::SmartContractEvent(contract_event) = alice_vote_event { - assert_eq!( - contract_event.value, - TupleData::from_data(vec![ - ( - "event".into(), - Value::string_ascii_from_bytes("voted".as_bytes().to_vec()) - .expect("Failed to create string") - ), - ("key".into(), aggregate_public_key.clone()), - ("new-total".into(), Value::UInt(2)), - ("reward-cycle".into(), Value::UInt(cycle_id + 1)), - ("round".into(), Value::UInt(0)), - ("signer".into(), Value::Principal(alice_principal.clone())), - ]) - .expect("Failed to create tuple") - .into() - ); - } else { - panic!("Expected SmartContractEvent, got {:?}", alice_vote_event); - } - - // Bob's vote should succeed and reach the threshold, setting the aggregate public key - let bob_vote_tx = &receipts[3]; - assert_eq!(bob_vote_tx.result, Value::okay_true()); - assert_eq!(bob_vote_tx.events.len(), 2); - let bob_vote_event = &bob_vote_tx.events[0]; - if let StacksTransactionEvent::SmartContractEvent(contract_event) = bob_vote_event { - assert_eq!( - contract_event.value, - TupleData::from_data(vec![ - ( - "event".into(), - Value::string_ascii_from_bytes("voted".as_bytes().to_vec()) - .expect("Failed to create string") - ), - ("key".into(), aggregate_public_key.clone()), - ("new-total".into(), Value::UInt(4)), - ("reward-cycle".into(), Value::UInt(cycle_id + 1)), - ("round".into(), Value::UInt(0)), - ("signer".into(), Value::Principal(bob_principal.clone())), - ]) - .expect("Failed to create tuple") - .into() - ); - } else { - panic!("Expected SmartContractEvent, got {:?}", alice_vote_event); - } - let approve_event = &bob_vote_tx.events[1]; - if let StacksTransactionEvent::SmartContractEvent(contract_event) = approve_event { - assert_eq!( - contract_event.value, - TupleData::from_data(vec![ - ( - "event".into(), - Value::string_ascii_from_bytes( - "approved-aggregate-public-key".as_bytes().to_vec() - ) - .expect("Failed to create string") - ), - ("key".into(), aggregate_public_key.clone()), - ("reward-cycle".into(), Value::UInt(cycle_id + 1)), - ("round".into(), Value::UInt(0)), - ]) - .expect("Failed to create tuple") - .into() - ); - } else { - panic!("Expected SmartContractEvent, got {:?}", approve_event); - } -} - -/// In this test case, Alice & Bob both successfully vote for the same key, -/// but also trigger all tenure-agnostic errors. -#[test] -fn vote_for_aggregate_public_key_with_errors() { - // Test setup - let alice = TestStacker::from_seed(&[3, 4]); - let bob = TestStacker::from_seed(&[5, 6]); - let observer = TestEventObserver::new(); - - // Alice - Signer 1 - let alice_key = &alice.signer_private_key; - let alice_address = key_to_stacks_addr(alice_key); - let alice_principal = PrincipalData::from(alice_address); - - // Bob - Signer 2 - let bob_key = &bob.signer_private_key; - let bob_address = key_to_stacks_addr(bob_key); - let bob_principal = PrincipalData::from(bob_address); - - let (mut peer, mut test_signers, latest_block_id, current_reward_cycle) = prepare_signers_test( - function_name!(), - vec![ - (alice_principal.clone(), 1000), - (bob_principal.clone(), 1000), - ], - &[alice.clone(), bob.clone()], - Some(&observer), - ); - - // Alice and Bob will each have voted once while booting to Nakamoto - let alice_nonce = 1; - let bob_nonce = 1; - - let cycle_id = current_reward_cycle; - - // create vote txs - let alice_index = get_signer_index(&mut peer, latest_block_id, alice_address, cycle_id); - let bob_index = get_signer_index(&mut peer, latest_block_id, bob_address, cycle_id); - - let mut signers = TestSigners::default(); - let aggregate_key = signers.generate_aggregate_key(cycle_id as u64 + 1); - let aggregate_public_key = Value::buff_from(aggregate_key.compress().data.to_vec()) - .expect("Failed to serialize aggregate public key"); - - let aggregate_public_key_ill_formed = Value::buff_from_byte(0x00); - - let txs = vec![ - // Alice casts a vote with a non-existant index - should return signer index mismatch error - make_signers_vote_for_aggregate_public_key_value( - alice_key, - alice_nonce, - bob_index, - aggregate_public_key.clone(), - 0, - cycle_id + 1, - ), - // Alice casts a vote with Bobs index - should return invalid signer index error - make_signers_vote_for_aggregate_public_key_value( - alice_key, - alice_nonce + 1, - 2, - aggregate_public_key.clone(), - 0, - cycle_id + 1, - ), - // Alice casts a vote with an invalid public key - should return ill-formed public key error - make_signers_vote_for_aggregate_public_key_value( - alice_key, - alice_nonce + 2, - alice_index, - aggregate_public_key_ill_formed, - 0, - cycle_id + 1, - ), - // Alice casts a vote with an incorrect reward cycle - should return cycle not set error - make_signers_vote_for_aggregate_public_key_value( - alice_key, - alice_nonce + 3, - alice_index, - aggregate_public_key.clone(), - 0, - cycle_id + 2, - ), - // Alice casts vote correctly - make_signers_vote_for_aggregate_public_key_value( - alice_key, - alice_nonce + 4, - alice_index, - aggregate_public_key.clone(), - 0, - cycle_id + 1, - ), - // Alice casts vote twice - should return duplicate vote error - make_signers_vote_for_aggregate_public_key_value( - alice_key, - alice_nonce + 5, - alice_index, - aggregate_public_key.clone(), - 0, - cycle_id + 1, - ), - // Bob casts a vote with the wrong round - should return an invalid round error - make_signers_vote_for_aggregate_public_key_value( - bob_key, - bob_nonce, - bob_index, - aggregate_public_key.clone(), - 2, - cycle_id + 1, - ), - // Bob casts a vote correctly - make_signers_vote_for_aggregate_public_key_value( - bob_key, - bob_nonce + 1, - bob_index, - aggregate_public_key.clone(), - 0, - cycle_id + 1, - ), - ]; - - // - // vote in the first burn block of prepare phase - // - let blocks_and_sizes = nakamoto_tenure(&mut peer, &mut test_signers, vec![txs]); - - // check the last eight txs in the last block - let block = observer.get_blocks().last().unwrap().clone(); - let receipts = block.receipts.as_slice(); - assert_eq!(receipts.len(), 10); - // ignore tenure change tx - // ignore tenure coinbase tx - - // Alice's first vote should fail (signer mismatch) - let alice_first_vote_tx = &receipts[2]; - let alice_first_vote_tx_result = alice_first_vote_tx.result.clone(); - assert_eq!( - alice_first_vote_tx_result, - Value::err_uint(10) // ERR_SIGNER_INDEX_MISMATCH - ); - assert_eq!(alice_first_vote_tx.events.len(), 0); - - // Alice's second vote should fail (invalid signer) - let alice_second_vote_tx = &receipts[3]; - let alice_second_vote_tx_result = alice_second_vote_tx.result.clone(); - assert_eq!( - alice_second_vote_tx_result, - Value::err_uint(11) // ERR_INVALID_SIGNER_INDEX - ); - assert_eq!(alice_second_vote_tx.events.len(), 0); - - // Alice's third vote should fail (ill formed aggregate public key) - let alice_third_vote_tx = &receipts[4]; - let alice_third_vote_tx_result = alice_third_vote_tx.result.clone(); - assert_eq!( - alice_third_vote_tx_result, - Value::err_uint(13) // ERR_ILL_FORMED_AGGREGATE_PUBLIC_KEY - ); - assert_eq!(alice_third_vote_tx.events.len(), 0); - - // Alice's fourth vote should fail (cycle not set) - let alice_fourth_vote_tx = &receipts[5]; - let alice_fourth_vote_tx_result = alice_fourth_vote_tx.result.clone(); - assert_eq!( - alice_fourth_vote_tx_result, - Value::err_uint(2) // ERR_CYCLE_NOT_SET - ); - assert_eq!(alice_fourth_vote_tx.events.len(), 0); - - // Alice's fifth vote, correct vote should succeed - let alice_fifth_vote_tx = &receipts[6]; - assert_eq!(alice_fifth_vote_tx.result, Value::okay_true()); - assert_eq!(alice_fifth_vote_tx.events.len(), 1); - let alice_vote_event = &alice_fifth_vote_tx.events[0]; - if let StacksTransactionEvent::SmartContractEvent(contract_event) = alice_vote_event { - assert_eq!( - contract_event.value, - TupleData::from_data(vec![ - ( - "event".into(), - Value::string_ascii_from_bytes("voted".as_bytes().to_vec()) - .expect("Failed to create string") - ), - ("key".into(), aggregate_public_key.clone()), - ("new-total".into(), Value::UInt(2)), - ("reward-cycle".into(), Value::UInt(cycle_id + 1)), - ("round".into(), Value::UInt(0)), - ("signer".into(), Value::Principal(alice_principal.clone())), - ]) - .expect("Failed to create tuple") - .into() - ); - } else { - panic!("Expected SmartContractEvent, got {:?}", alice_vote_event); - } - - // Alice's sixth vote should fail (duplicate vote) - let alice_sixth_vote_tx = &receipts[7]; - let alice_sixth_vote_tx_result = alice_sixth_vote_tx.result.clone(); - assert_eq!( - alice_sixth_vote_tx_result, - Value::err_uint(15) // ERR_DUPLICATE_VOTE - ); - assert_eq!(alice_sixth_vote_tx.events.len(), 0); - - // Bob's first vote should fail (invalid round) - let bob_first_vote_tx = &receipts[8]; - let bob_first_vote_tx_result = bob_first_vote_tx.result.clone(); - assert_eq!( - bob_first_vote_tx_result, - Value::err_uint(17) // ERR_INVALID_ROUND - ); - assert_eq!(bob_first_vote_tx.events.len(), 0); - - // Bob's second vote should succeed and reach the threshold, setting the aggregate public key - let bob_second_vote_tx = &receipts[9]; - assert_eq!(bob_second_vote_tx.result, Value::okay_true()); - assert_eq!(bob_second_vote_tx.events.len(), 2); - let bob_vote_event = &bob_second_vote_tx.events[0]; - if let StacksTransactionEvent::SmartContractEvent(contract_event) = bob_vote_event { - assert_eq!( - contract_event.value, - TupleData::from_data(vec![ - ( - "event".into(), - Value::string_ascii_from_bytes("voted".as_bytes().to_vec()) - .expect("Failed to create string") - ), - ("key".into(), aggregate_public_key.clone()), - ("new-total".into(), Value::UInt(4)), - ("reward-cycle".into(), Value::UInt(cycle_id + 1)), - ("round".into(), Value::UInt(0)), - ("signer".into(), Value::Principal(bob_principal.clone())), - ]) - .expect("Failed to create tuple") - .into() - ); - } else { - panic!("Expected SmartContractEvent, got {:?}", alice_vote_event); - } - let approve_event = &bob_second_vote_tx.events[1]; - if let StacksTransactionEvent::SmartContractEvent(contract_event) = approve_event { - assert_eq!( - contract_event.value, - TupleData::from_data(vec![ - ( - "event".into(), - Value::string_ascii_from_bytes( - "approved-aggregate-public-key".as_bytes().to_vec() - ) - .expect("Failed to create string") - ), - ("key".into(), aggregate_public_key.clone()), - ("reward-cycle".into(), Value::UInt(cycle_id + 1)), - ("round".into(), Value::UInt(0)), - ]) - .expect("Failed to create tuple") - .into() - ); - } else { - panic!("Expected SmartContractEvent, got {:?}", approve_event); - } -} - -/// 4 stackers vote for the same aggregate public key. The threshold is reached -/// after the 3rd vote, so the 4th gets an "out of voting window" error. -#[test] -fn vote_for_aggregate_public_key_out_of_window() { - // Test setup - let stacker1 = TestStacker::from_seed(&[3, 4]); - let stacker2 = TestStacker::from_seed(&[5, 6]); - let stacker3 = TestStacker::from_seed(&[7, 8]); - let stacker4 = TestStacker::from_seed(&[9, 10]); - let observer = TestEventObserver::new(); - - // Signer 1 - let stacker1_key = &stacker1.signer_private_key; - let stacker1_address = key_to_stacks_addr(stacker1_key); - let stacker1_principal = PrincipalData::from(stacker1_address); - - // Signer 2 - let stacker2_key = &stacker2.signer_private_key; - let stacker2_address = key_to_stacks_addr(stacker2_key); - let stacker2_principal = PrincipalData::from(stacker2_address); - - // Signer 3 - let stacker3_key = &stacker3.signer_private_key; - let stacker3_address = key_to_stacks_addr(stacker3_key); - let stacker3_principal = PrincipalData::from(stacker3_address); - - // Signer 4 - let stacker4_key = &stacker4.signer_private_key; - let stacker4_address = key_to_stacks_addr(stacker4_key); - let stacker4_principal = PrincipalData::from(stacker4_address); - - let (mut peer, mut test_signers, latest_block_id, current_reward_cycle) = prepare_signers_test( - function_name!(), - vec![ - (stacker1_principal.clone(), 1000), - (stacker2_principal.clone(), 1000), - (stacker3_principal.clone(), 1000), - (stacker4_principal.clone(), 1000), - ], - &[ - stacker1.clone(), - stacker2.clone(), - stacker3.clone(), - stacker4.clone(), - ], - Some(&observer), - ); - - // Stackers will each have voted once while booting to Nakamoto - let stacker1_nonce = 1; - let stacker2_nonce = 1; - let stacker3_nonce = 1; - let stacker4_nonce = 1; - - let cycle_id = current_reward_cycle; - - // create vote txs - let stacker1_index = get_signer_index(&mut peer, latest_block_id, stacker1_address, cycle_id); - let stacker2_index = get_signer_index(&mut peer, latest_block_id, stacker2_address, cycle_id); - let stacker3_index = get_signer_index(&mut peer, latest_block_id, stacker3_address, cycle_id); - let stacker4_index = get_signer_index(&mut peer, latest_block_id, stacker4_address, cycle_id); - - let mut signers = TestSigners::default(); - let aggregate_key = signers.generate_aggregate_key(cycle_id as u64 + 1); - let aggregate_public_key = Value::buff_from(aggregate_key.compress().data.to_vec()) - .expect("Failed to serialize aggregate public key"); - - let txs = vec![ - // stacker1 casts vote correctly - make_signers_vote_for_aggregate_public_key_value( - stacker1_key, - stacker1_nonce, - stacker1_index, - aggregate_public_key.clone(), - 0, - cycle_id + 1, - ), - // stacker2 casts vote correctly - make_signers_vote_for_aggregate_public_key_value( - stacker2_key, - stacker2_nonce, - stacker2_index, - aggregate_public_key.clone(), - 0, - cycle_id + 1, - ), - // stacker3 casts vote correctly - make_signers_vote_for_aggregate_public_key_value( - stacker3_key, - stacker3_nonce, - stacker3_index, - aggregate_public_key.clone(), - 0, - cycle_id + 1, - ), - // stacker4 casts vote correctly, but it will return an out of voting window error - make_signers_vote_for_aggregate_public_key_value( - stacker4_key, - stacker4_nonce, - stacker4_index, - aggregate_public_key.clone(), - 0, - cycle_id + 1, - ), - ]; - - // - // vote in the first burn block of prepare phase - // - let blocks_and_sizes = nakamoto_tenure(&mut peer, &mut test_signers, vec![txs]); - - // check the last two txs in the last block - let block = observer.get_blocks().last().unwrap().clone(); - let receipts = block.receipts.as_slice(); - assert_eq!(receipts.len(), 6); - // ignore tenure change tx - // ignore tenure coinbase tx - - // stacker1's vote should succeed - let stacker1_vote_tx = &receipts[2]; - assert_eq!(stacker1_vote_tx.result, Value::okay_true()); - assert_eq!(stacker1_vote_tx.events.len(), 1); - let stacker1_vote_event = &stacker1_vote_tx.events[0]; - if let StacksTransactionEvent::SmartContractEvent(contract_event) = stacker1_vote_event { - assert_eq!( - contract_event.value, - TupleData::from_data(vec![ - ( - "event".into(), - Value::string_ascii_from_bytes("voted".as_bytes().to_vec()) - .expect("Failed to create string") - ), - ("key".into(), aggregate_public_key.clone()), - ("new-total".into(), Value::UInt(1)), - ("reward-cycle".into(), Value::UInt(cycle_id + 1)), - ("round".into(), Value::UInt(0)), - ( - "signer".into(), - Value::Principal(stacker1_principal.clone()) - ), - ]) - .expect("Failed to create tuple") - .into() - ); - } else { - panic!("Expected SmartContractEvent, got {:?}", stacker1_vote_event); - } - - // stacker2's vote should succeed - let stacker2_vote_tx = &receipts[3]; - assert_eq!(stacker2_vote_tx.result, Value::okay_true()); - assert_eq!(stacker2_vote_tx.events.len(), 1); - let stacker2_vote_event = &stacker2_vote_tx.events[0]; - if let StacksTransactionEvent::SmartContractEvent(contract_event) = stacker2_vote_event { - assert_eq!( - contract_event.value, - TupleData::from_data(vec![ - ( - "event".into(), - Value::string_ascii_from_bytes("voted".as_bytes().to_vec()) - .expect("Failed to create string") - ), - ("key".into(), aggregate_public_key.clone()), - ("new-total".into(), Value::UInt(2)), - ("reward-cycle".into(), Value::UInt(cycle_id + 1)), - ("round".into(), Value::UInt(0)), - ( - "signer".into(), - Value::Principal(stacker2_principal.clone()) - ), - ]) - .expect("Failed to create tuple") - .into() - ); - } else { - panic!("Expected SmartContractEvent, got {:?}", stacker2_vote_event); - } - - // stacker3's vote should succeed - let stacker3_vote_tx = &receipts[4]; - assert_eq!(stacker3_vote_tx.result, Value::okay_true()); - assert_eq!(stacker3_vote_tx.events.len(), 2); - let stacker3_vote_event = &stacker3_vote_tx.events[0]; - if let StacksTransactionEvent::SmartContractEvent(contract_event) = stacker3_vote_event { - assert_eq!( - contract_event.value, - TupleData::from_data(vec![ - ( - "event".into(), - Value::string_ascii_from_bytes("voted".as_bytes().to_vec()) - .expect("Failed to create string") - ), - ("key".into(), aggregate_public_key.clone()), - ("new-total".into(), Value::UInt(3)), - ("reward-cycle".into(), Value::UInt(cycle_id + 1)), - ("round".into(), Value::UInt(0)), - ( - "signer".into(), - Value::Principal(stacker3_principal.clone()) - ), - ]) - .expect("Failed to create tuple") - .into() - ); - } else { - panic!("Expected SmartContractEvent, got {:?}", stacker3_vote_event); - } - let approve_event = &stacker3_vote_tx.events[1]; - if let StacksTransactionEvent::SmartContractEvent(contract_event) = approve_event { - assert_eq!( - contract_event.value, - TupleData::from_data(vec![ - ( - "event".into(), - Value::string_ascii_from_bytes( - "approved-aggregate-public-key".as_bytes().to_vec() - ) - .expect("Failed to create string") - ), - ("key".into(), aggregate_public_key.clone()), - ("reward-cycle".into(), Value::UInt(cycle_id + 1)), - ("round".into(), Value::UInt(0)), - ]) - .expect("Failed to create tuple") - .into() - ); - } else { - panic!("Expected SmartContractEvent, got {:?}", approve_event); - } - - // stacker4's vote should get an out of voting window error - let stacker4_vote_tx = &receipts[5]; - assert_eq!( - stacker4_vote_tx.result, - Value::err_uint(12) // ERR_OUT_OF_VOTING_WINDOW - ); - assert_eq!(stacker4_vote_tx.events.len(), 0); -} - -/// In this test case, Alice votes in the first block of the first tenure of the prepare phase. -/// Alice can vote successfully. -/// A second vote on the same key and round fails with "duplicate vote" error -#[test] -fn vote_for_aggregate_public_key_in_first_block() { - let stacker_1 = TestStacker::from_seed(&[3, 4]); - let stacker_2 = TestStacker::from_seed(&[5, 6]); - let observer = TestEventObserver::new(); - - let signer = key_to_stacks_addr(&stacker_1.signer_private_key).to_account_principal(); - - let (mut peer, mut test_signers, latest_block_id, current_reward_cycle) = prepare_signers_test( - function_name!(), - vec![(signer, 1000)], - &[stacker_1.clone(), stacker_2.clone()], - Some(&observer), - ); - - // create vote txs - let signer_nonce = 1; // Start at 1 because the signer has already voted once - let signer_key = &stacker_1.signer_private_key; - let signer_address = key_to_stacks_addr(signer_key); - let signer_principal = PrincipalData::from(signer_address); - let cycle_id = current_reward_cycle; - - let signer_index = get_signer_index(&mut peer, latest_block_id, signer_address, cycle_id); - - let mut signers = TestSigners::default(); - let aggregate_public_key = signers.generate_aggregate_key(cycle_id as u64 + 1); - - let txs = vec![ - // cast a vote for the aggregate public key - make_signers_vote_for_aggregate_public_key( - signer_key, - signer_nonce, - signer_index, - &aggregate_public_key, - 0, - cycle_id + 1, - ), - // cast the vote twice - make_signers_vote_for_aggregate_public_key( - signer_key, - signer_nonce + 1, - signer_index, - &aggregate_public_key, - 0, - cycle_id + 1, - ), - ]; - - // - // vote in the first burn block of prepare phase - // - let blocks_and_sizes = nakamoto_tenure(&mut peer, &mut test_signers, vec![txs]); - - // check the last two txs in the last block - let block = observer.get_blocks().last().unwrap().clone(); - let receipts = block.receipts.as_slice(); - assert_eq!(receipts.len(), 4); - // ignore tenure change tx - // ignore tenure coinbase tx - - // first vote should succeed - let alice_first_vote_tx = &receipts[2]; - assert_eq!(alice_first_vote_tx.result, Value::okay_true()); - - // second vote should fail with duplicate vote error - let alice_second_vote_tx = &receipts[3]; - assert_eq!( - alice_second_vote_tx.result, - Value::err_uint(15) // ERR_DUPLICATE_VOTE - ); - assert_eq!(alice_second_vote_tx.events.len(), 0); -} - -/// In this test case, Alice votes in the first block of the last tenure of the prepare phase. -/// Bob votes in the second block of that tenure. -/// Both can vote successfully. -#[test] -fn vote_for_aggregate_public_key_in_last_block() { - let stacker_1 = TestStacker::from_seed(&[3, 4]); - let stacker_2 = TestStacker::from_seed(&[5, 6]); - let observer = TestEventObserver::new(); - - let signer_1 = key_to_stacks_addr(&stacker_1.signer_private_key).to_account_principal(); - let signer_2 = key_to_stacks_addr(&stacker_2.signer_private_key).to_account_principal(); - - let (mut peer, mut test_signers, latest_block_id, current_reward_cycle) = prepare_signers_test( - function_name!(), - vec![(signer_1, 1000), (signer_2, 1000)], - &[stacker_1.clone(), stacker_2.clone()], - Some(&observer), - ); - - let mut stacker_1_nonce: u64 = 1; - let dummy_tx_1 = make_dummy_tx( - &mut peer, - &stacker_1.stacker_private_key, - &mut stacker_1_nonce, - ); - let dummy_tx_2 = make_dummy_tx( - &mut peer, - &stacker_1.stacker_private_key, - &mut stacker_1_nonce, - ); - let dummy_tx_3 = make_dummy_tx( - &mut peer, - &stacker_1.stacker_private_key, - &mut stacker_1_nonce, - ); - - let cycle_id: u128 = current_reward_cycle; - let mut signers = TestSigners::default(); - let aggregate_public_key_1 = signers.generate_aggregate_key(cycle_id as u64 + 1); - let aggregate_public_key_2 = signers.generate_aggregate_key(cycle_id as u64 + 2); - - // create vote txs for alice - let signer_1_nonce = 1; // Start at 1 because the signer has already voted once - let signer_1_key = &stacker_1.signer_private_key; - let signer_1_address = key_to_stacks_addr(signer_1_key); - let signer_1_principal = PrincipalData::from(signer_1_address); - let signer_1_index = get_signer_index(&mut peer, latest_block_id, signer_1_address, cycle_id); - - let txs_block_1 = vec![ - // cast a vote for the aggregate public key - make_signers_vote_for_aggregate_public_key( - signer_1_key, - signer_1_nonce, - signer_1_index, - &aggregate_public_key_1, - 1, - cycle_id + 1, - ), - // cast the vote twice - make_signers_vote_for_aggregate_public_key( - signer_1_key, - signer_1_nonce + 1, - signer_1_index, - &aggregate_public_key_1, - 1, - cycle_id + 1, - ), - // cast a vote for old round - make_signers_vote_for_aggregate_public_key( - signer_1_key, - signer_1_nonce + 2, - signer_1_index, - &aggregate_public_key_2, - 0, - cycle_id + 1, - ), - ]; - - // create vote txs for bob - let signer_2_nonce = 1; // Start at 1 because the signer has already voted once - let signer_2_key = &stacker_2.signer_private_key; - let signer_2_address = key_to_stacks_addr(signer_2_key); - let signer_2_principal = PrincipalData::from(signer_2_address); - let signer_2_index = get_signer_index(&mut peer, latest_block_id, signer_2_address, cycle_id); - - let txs_block_2 = vec![ - // cast a vote for the aggregate public key - make_signers_vote_for_aggregate_public_key( - signer_2_key, - signer_2_nonce, - signer_2_index, - &aggregate_public_key_1, - 0, - cycle_id + 1, - ), - ]; - - // - // vote in the last burn block of prepare phase - // - - nakamoto_tenure(&mut peer, &mut test_signers, vec![vec![dummy_tx_1]]); - - // alice votes in first block of tenure - // bob votes in second block of tenure - let blocks_and_sizes = - nakamoto_tenure(&mut peer, &mut test_signers, vec![txs_block_1, txs_block_2]); - - // check alice's and bob's txs - let blocks = observer.get_blocks(); - - // alice's block - let block = &blocks[blocks.len() - 2].clone(); - let receipts = &block.receipts; - assert_eq!(receipts.len(), 5); - - // first vote should succeed - let alice_first_vote_tx = &receipts[2]; - assert_eq!(alice_first_vote_tx.result, Value::okay_true()); - - // second vote should fail with duplicate vote error - let alice_second_vote_tx = &receipts[3]; - assert_eq!( - alice_second_vote_tx.result, - Value::err_uint(15) // ERR_DUPLICATE_VOTE - ); - assert_eq!(alice_second_vote_tx.events.len(), 0); - - // third vote should succeed even though it is on an old round - let alice_third_vote_tx = &receipts[4]; - assert_eq!(alice_third_vote_tx.result, Value::okay_true()); - - // bob's block - let block = blocks.last().unwrap().clone(); - let receipts = block.receipts.as_slice(); - assert_eq!(receipts.len(), 1); - - // bob's vote should succeed - let tx1_bob = &receipts[0]; - assert_eq!(tx1_bob.result, Value::okay_true()); -} - -/// In this test case, Alice & Bob both successfully vote in cycle N, then -/// Alice tries to vote for the same signature in cycle N+1, but fails with -/// "duplicate aggregate public key" error. -#[test] -fn vote_for_duplicate_aggregate_public_key() { - // Test setup - let alice = TestStacker::from_seed(&[3, 4]); - let bob = TestStacker::from_seed(&[5, 6]); - let observer = TestEventObserver::new(); - - // Alice - Signer 1 - let alice_key = &alice.signer_private_key; - let alice_address = key_to_stacks_addr(alice_key); - let alice_principal = PrincipalData::from(alice_address); - - // Bob - Signer 2 - let bob_key = &bob.signer_private_key; - let bob_address = key_to_stacks_addr(bob_key); - let bob_principal = PrincipalData::from(bob_address); - - let (mut peer, mut test_signers, latest_block_id, current_reward_cycle) = prepare_signers_test( - function_name!(), - vec![ - (alice_principal.clone(), 1000), - (bob_principal.clone(), 1000), - ], - &[alice.clone(), bob.clone()], - Some(&observer), - ); - - // Alice and Bob will each have voted once while booting to Nakamoto - let alice_nonce = 1; - let bob_nonce = 1; - - let cycle_id = current_reward_cycle; - - // create vote txs - let alice_index = get_signer_index(&mut peer, latest_block_id, alice_address, cycle_id); - let bob_index = get_signer_index(&mut peer, latest_block_id, bob_address, cycle_id); - - let mut signers = TestSigners::default(); - let aggregate_public_key_point = signers.generate_aggregate_key(cycle_id as u64 + 1); - let aggregate_public_key = - Value::buff_from(aggregate_public_key_point.compress().data.to_vec()) - .expect("Failed to serialize aggregate public key"); - - let txs = vec![ - // Alice casts vote correctly - make_signers_vote_for_aggregate_public_key_value( - alice_key, - alice_nonce, - alice_index, - aggregate_public_key.clone(), - 0, - cycle_id + 1, - ), - // Bob casts a vote correctly - make_signers_vote_for_aggregate_public_key_value( - bob_key, - bob_nonce, - bob_index, - aggregate_public_key.clone(), - 0, - cycle_id + 1, - ), - ]; - - // vote in the first burn block of prepare phase - let blocks_and_sizes = nakamoto_tenure(&mut peer, &mut test_signers, vec![txs]); - - // check the last two txs in the last block - let block = observer.get_blocks().last().unwrap().clone(); - let receipts = block.receipts.as_slice(); - assert_eq!(receipts.len(), 4); - // ignore tenure change tx - // ignore tenure coinbase tx - - // Both votes should succeed - let alice_vote_tx = &receipts[2]; - assert_eq!(alice_vote_tx.result, Value::okay_true()); - let bob_vote_tx = &receipts[3]; - assert_eq!(bob_vote_tx.result, Value::okay_true()); - - // Proceed to the next prepare phase - let _ = nakamoto_tenure(&mut peer, &mut test_signers, Vec::new()); - let _ = nakamoto_tenure(&mut peer, &mut test_signers, Vec::new()); - let _ = nakamoto_tenure(&mut peer, &mut test_signers, Vec::new()); - let _ = nakamoto_tenure(&mut peer, &mut test_signers, Vec::new()); - - let aggregate_public_key_point = signers.generate_aggregate_key(cycle_id as u64 + 2); - let aggregate_public_key_2 = - Value::buff_from(aggregate_public_key_point.compress().data.to_vec()) - .expect("Failed to serialize aggregate public key"); - - let txs = vec![ - // Alice casts vote for the same key as the last cycle - make_signers_vote_for_aggregate_public_key_value( - alice_key, - alice_nonce + 1, - alice_index, - aggregate_public_key.clone(), - 0, - cycle_id + 2, - ), - // Alice casts vote for a new key - make_signers_vote_for_aggregate_public_key_value( - alice_key, - alice_nonce + 2, - alice_index, - aggregate_public_key_2.clone(), - 0, - cycle_id + 2, - ), - // Bob casts vote for the same key - make_signers_vote_for_aggregate_public_key_value( - bob_key, - bob_nonce + 1, - bob_index, - aggregate_public_key_2.clone(), - 0, - cycle_id + 2, - ), - ]; - - // Submit the vote in a new block - nakamoto_tenure(&mut peer, &mut test_signers, vec![txs]); - - // Check the last 3 tx in the last block - let block = observer.get_blocks().last().unwrap().clone(); - let receipts = block.receipts.as_slice(); - assert_eq!(receipts.len(), 5); - - // Alice's vote should fail with duplicate aggregate public key error - let alice_vote_tx = &receipts[2]; - assert_eq!( - alice_vote_tx.result, - Value::err_uint(14) // ERR_DUPLICATE_AGGREGATE_PUBLIC_KEY - ); - assert_eq!(alice_vote_tx.events.len(), 0); - - // Both remaining votes should succeed - let alice_vote_tx = &receipts[3]; - assert_eq!(alice_vote_tx.result, Value::okay_true()); - let bob_vote_tx = &receipts[4]; - assert_eq!(bob_vote_tx.result, Value::okay_true()); -} - -/// In this test case, Alice & Bob both successfully vote in cycle N, but for -/// different keys. Then in round 1, they both vote for the same key and -/// key selection is successful. In the first cycle, these two rounds happen -/// in the same tenure. In the second cycle, the first round happens in the -/// first tenure of the prepare phase, and the second round happens in the -/// second tenure of the prepare phase. -#[test] -fn vote_for_aggregate_public_key_two_rounds() { - // Test setup - let alice = TestStacker::from_seed(&[3, 4]); - let bob = TestStacker::from_seed(&[5, 6]); - let observer = TestEventObserver::new(); - - // Alice - Signer 1 - let alice_key = &alice.signer_private_key; - let alice_address = key_to_stacks_addr(alice_key); - let alice_principal = PrincipalData::from(alice_address); - - // Bob - Signer 2 - let bob_key = &bob.signer_private_key; - let bob_address = key_to_stacks_addr(bob_key); - let bob_principal = PrincipalData::from(bob_address); - - let (mut peer, mut test_signers, latest_block_id, current_reward_cycle) = prepare_signers_test( - function_name!(), - vec![ - (alice_principal.clone(), 1000), - (bob_principal.clone(), 1000), - ], - &[alice.clone(), bob.clone()], - Some(&observer), - ); - - // Alice and Bob will each have voted once while booting to Nakamoto - let alice_nonce = 1; - let bob_nonce = 1; - - let cycle_id = current_reward_cycle; - - // create vote txs - let alice_index = get_signer_index(&mut peer, latest_block_id, alice_address, cycle_id); - let bob_index = get_signer_index(&mut peer, latest_block_id, bob_address, cycle_id); - - let mut signers = TestSigners::default(); - let aggregate_public_key_0_point = signers.generate_aggregate_key(0); - let aggregate_public_key_0 = - Value::buff_from(aggregate_public_key_0_point.compress().data.to_vec()) - .expect("Failed to serialize aggregate public key"); - let aggregate_public_key_1_point = signers.generate_aggregate_key(cycle_id as u64 + 1); - let aggregate_public_key_1 = - Value::buff_from(aggregate_public_key_1_point.compress().data.to_vec()) - .expect("Failed to serialize aggregate public key"); - - let txs = vec![ - // Alice casts vote for key 0 in round 0 - make_signers_vote_for_aggregate_public_key_value( - alice_key, - alice_nonce, - alice_index, - aggregate_public_key_0.clone(), - 0, - cycle_id + 1, - ), - // Bob casts a vote for key 1 in round 0 - make_signers_vote_for_aggregate_public_key_value( - bob_key, - bob_nonce, - bob_index, - aggregate_public_key_1.clone(), - 0, - cycle_id + 1, - ), - // Alice casts vote for key 1 in round 1 - make_signers_vote_for_aggregate_public_key_value( - alice_key, - alice_nonce + 1, - alice_index, - aggregate_public_key_1.clone(), - 1, - cycle_id + 1, - ), - // Bob casts a vote for key 1 in round 1 - make_signers_vote_for_aggregate_public_key_value( - bob_key, - bob_nonce + 1, - bob_index, - aggregate_public_key_1.clone(), - 1, - cycle_id + 1, - ), - ]; - - // vote in the first burn block of prepare phase - let blocks_and_sizes = nakamoto_tenure(&mut peer, &mut test_signers, vec![txs]); - - // check the last four txs in the last block - let block = observer.get_blocks().last().unwrap().clone(); - let receipts = block.receipts.as_slice(); - assert_eq!(receipts.len(), 6); - // ignore tenure change tx - // ignore tenure coinbase tx - - // All votes should succeed - let alice_vote_tx = &receipts[2]; - assert_eq!(alice_vote_tx.result, Value::okay_true()); - assert_eq!(alice_vote_tx.events.len(), 1); - let alice_vote_event = &alice_vote_tx.events[0]; - if let StacksTransactionEvent::SmartContractEvent(contract_event) = alice_vote_event { - assert_eq!( - contract_event.value, - TupleData::from_data(vec![ - ( - "event".into(), - Value::string_ascii_from_bytes("voted".as_bytes().to_vec()) - .expect("Failed to create string") - ), - ("key".into(), aggregate_public_key_0.clone()), - ("new-total".into(), Value::UInt(2)), - ("reward-cycle".into(), Value::UInt(cycle_id + 1)), - ("round".into(), Value::UInt(0)), - ("signer".into(), Value::Principal(alice_principal.clone())), - ]) - .expect("Failed to create tuple") - .into() - ); - } else { - panic!("Expected SmartContractEvent, got {:?}", alice_vote_event); - } - - let bob_vote_tx = &receipts[3]; - assert_eq!(bob_vote_tx.result, Value::okay_true()); - assert_eq!(bob_vote_tx.events.len(), 1); - let bob_vote_event = &bob_vote_tx.events[0]; - if let StacksTransactionEvent::SmartContractEvent(contract_event) = bob_vote_event { - assert_eq!( - contract_event.value, - TupleData::from_data(vec![ - ( - "event".into(), - Value::string_ascii_from_bytes("voted".as_bytes().to_vec()) - .expect("Failed to create string") - ), - ("key".into(), aggregate_public_key_1.clone()), - ("new-total".into(), Value::UInt(2)), - ("reward-cycle".into(), Value::UInt(cycle_id + 1)), - ("round".into(), Value::UInt(0)), - ("signer".into(), Value::Principal(bob_principal.clone())), - ]) - .expect("Failed to create tuple") - .into() - ); - } else { - panic!("Expected SmartContractEvent, got {:?}", bob_vote_event); - } - - let alice_vote_tx = &receipts[4]; - assert_eq!(alice_vote_tx.result, Value::okay_true()); - assert_eq!(alice_vote_tx.events.len(), 1); - let alice_vote_event = &alice_vote_tx.events[0]; - if let StacksTransactionEvent::SmartContractEvent(contract_event) = alice_vote_event { - assert_eq!( - contract_event.value, - TupleData::from_data(vec![ - ( - "event".into(), - Value::string_ascii_from_bytes("voted".as_bytes().to_vec()) - .expect("Failed to create string") - ), - ("key".into(), aggregate_public_key_1.clone()), - ("new-total".into(), Value::UInt(2)), - ("reward-cycle".into(), Value::UInt(cycle_id + 1)), - ("round".into(), Value::UInt(1)), - ("signer".into(), Value::Principal(alice_principal.clone())), - ]) - .expect("Failed to create tuple") - .into() - ); - } else { - panic!("Expected SmartContractEvent, got {:?}", alice_vote_event); - } - - let bob_vote_tx = &receipts[5]; - assert_eq!(bob_vote_tx.result, Value::okay_true()); - assert_eq!(bob_vote_tx.events.len(), 2); - let bob_vote_event = &bob_vote_tx.events[0]; - if let StacksTransactionEvent::SmartContractEvent(contract_event) = bob_vote_event { - assert_eq!( - contract_event.value, - TupleData::from_data(vec![ - ( - "event".into(), - Value::string_ascii_from_bytes("voted".as_bytes().to_vec()) - .expect("Failed to create string") - ), - ("key".into(), aggregate_public_key_1.clone()), - ("new-total".into(), Value::UInt(4)), - ("reward-cycle".into(), Value::UInt(cycle_id + 1)), - ("round".into(), Value::UInt(1)), - ("signer".into(), Value::Principal(bob_principal.clone())), - ]) - .expect("Failed to create tuple") - .into() - ); - } else { - panic!("Expected SmartContractEvent, got {:?}", bob_vote_event); - } - - // The aggregate key is approved in round 1 - let approve_event = &bob_vote_tx.events[1]; - if let StacksTransactionEvent::SmartContractEvent(contract_event) = approve_event { - assert_eq!( - contract_event.value, - TupleData::from_data(vec![ - ( - "event".into(), - Value::string_ascii_from_bytes( - "approved-aggregate-public-key".as_bytes().to_vec() - ) - .expect("Failed to create string") - ), - ("key".into(), aggregate_public_key_1.clone()), - ("reward-cycle".into(), Value::UInt(cycle_id + 1)), - ("round".into(), Value::UInt(1)), - ]) - .expect("Failed to create tuple") - .into() - ); - } else { - panic!("Expected SmartContractEvent, got {:?}", approve_event); - } - - // Proceed to the next prepare phase - let _ = nakamoto_tenure(&mut peer, &mut test_signers, Vec::new()); - let _ = nakamoto_tenure(&mut peer, &mut test_signers, Vec::new()); - let _ = nakamoto_tenure(&mut peer, &mut test_signers, Vec::new()); - let _ = nakamoto_tenure(&mut peer, &mut test_signers, Vec::new()); - - // In this cycle, the two rounds are in separate tenures. - - let aggregate_public_key_0_point = signers.generate_aggregate_key(1); - let aggregate_public_key_0 = - Value::buff_from(aggregate_public_key_0_point.compress().data.to_vec()) - .expect("Failed to serialize aggregate public key"); - let aggregate_public_key_1_point = signers.generate_aggregate_key(cycle_id as u64 + 2); - let aggregate_public_key_1 = - Value::buff_from(aggregate_public_key_1_point.compress().data.to_vec()) - .expect("Failed to serialize aggregate public key"); - - let txs = vec![ - // Alice casts vote for key 0 in round 0 - make_signers_vote_for_aggregate_public_key_value( - alice_key, - alice_nonce + 2, - alice_index, - aggregate_public_key_0.clone(), - 0, - cycle_id + 2, - ), - // Bob casts a vote for key 1 in round 0 - make_signers_vote_for_aggregate_public_key_value( - bob_key, - bob_nonce + 2, - bob_index, - aggregate_public_key_1.clone(), - 0, - cycle_id + 2, - ), - ]; - - // vote in the first burn block of prepare phase - let blocks_and_sizes = nakamoto_tenure(&mut peer, &mut test_signers, vec![txs]); - - // check the last two txs in the last block - let block = observer.get_blocks().last().unwrap().clone(); - let receipts = block.receipts.as_slice(); - assert_eq!(receipts.len(), 4); - // ignore tenure change tx - // ignore tenure coinbase tx - - // Both votes should succeed, but the aggregate key is not approved yet - let alice_vote_tx = &receipts[2]; - assert_eq!(alice_vote_tx.result, Value::okay_true()); - assert_eq!(alice_vote_tx.events.len(), 1); - let alice_vote_event = &alice_vote_tx.events[0]; - if let StacksTransactionEvent::SmartContractEvent(contract_event) = alice_vote_event { - assert_eq!( - contract_event.value, - TupleData::from_data(vec![ - ( - "event".into(), - Value::string_ascii_from_bytes("voted".as_bytes().to_vec()) - .expect("Failed to create string") - ), - ("key".into(), aggregate_public_key_0.clone()), - ("new-total".into(), Value::UInt(2)), - ("reward-cycle".into(), Value::UInt(cycle_id + 2)), - ("round".into(), Value::UInt(0)), - ("signer".into(), Value::Principal(alice_principal.clone())), - ]) - .expect("Failed to create tuple") - .into() - ); - } else { - panic!("Expected SmartContractEvent, got {:?}", alice_vote_event); - } - - let bob_vote_tx = &receipts[3]; - assert_eq!(bob_vote_tx.result, Value::okay_true()); - assert_eq!(bob_vote_tx.events.len(), 1); - let bob_vote_event = &bob_vote_tx.events[0]; - if let StacksTransactionEvent::SmartContractEvent(contract_event) = bob_vote_event { - assert_eq!( - contract_event.value, - TupleData::from_data(vec![ - ( - "event".into(), - Value::string_ascii_from_bytes("voted".as_bytes().to_vec()) - .expect("Failed to create string") - ), - ("key".into(), aggregate_public_key_1.clone()), - ("new-total".into(), Value::UInt(2)), - ("reward-cycle".into(), Value::UInt(cycle_id + 2)), - ("round".into(), Value::UInt(0)), - ("signer".into(), Value::Principal(bob_principal.clone())), - ]) - .expect("Failed to create tuple") - .into() - ); - } else { - panic!("Expected SmartContractEvent, got {:?}", bob_vote_event); - } - - let txs = vec![ - // Alice casts vote for key 1 in round 1 - make_signers_vote_for_aggregate_public_key_value( - alice_key, - alice_nonce + 3, - alice_index, - aggregate_public_key_1.clone(), - 1, - cycle_id + 2, - ), - // Bob casts a vote for key 1 in round 1 - make_signers_vote_for_aggregate_public_key_value( - bob_key, - bob_nonce + 3, - bob_index, - aggregate_public_key_1.clone(), - 1, - cycle_id + 2, - ), - ]; - - // vote again in the next burn block - let blocks_and_sizes = nakamoto_tenure(&mut peer, &mut test_signers, vec![txs]); - - // check the last two txs in the last block - let block = observer.get_blocks().last().unwrap().clone(); - let receipts = block.receipts.as_slice(); - assert_eq!(receipts.len(), 4); - // ignore tenure change tx - // ignore tenure coinbase tx - - let alice_vote_tx = &receipts[2]; - assert_eq!(alice_vote_tx.result, Value::okay_true()); - assert_eq!(alice_vote_tx.events.len(), 1); - let alice_vote_event = &alice_vote_tx.events[0]; - if let StacksTransactionEvent::SmartContractEvent(contract_event) = alice_vote_event { - assert_eq!( - contract_event.value, - TupleData::from_data(vec![ - ( - "event".into(), - Value::string_ascii_from_bytes("voted".as_bytes().to_vec()) - .expect("Failed to create string") - ), - ("key".into(), aggregate_public_key_1.clone()), - ("new-total".into(), Value::UInt(2)), - ("reward-cycle".into(), Value::UInt(cycle_id + 2)), - ("round".into(), Value::UInt(1)), - ("signer".into(), Value::Principal(alice_principal.clone())), - ]) - .expect("Failed to create tuple") - .into() - ); - } else { - panic!("Expected SmartContractEvent, got {:?}", alice_vote_event); - } - - let bob_vote_tx = &receipts[3]; - assert_eq!(bob_vote_tx.result, Value::okay_true()); - assert_eq!(bob_vote_tx.events.len(), 2); - let bob_vote_event = &bob_vote_tx.events[0]; - if let StacksTransactionEvent::SmartContractEvent(contract_event) = bob_vote_event { - assert_eq!( - contract_event.value, - TupleData::from_data(vec![ - ( - "event".into(), - Value::string_ascii_from_bytes("voted".as_bytes().to_vec()) - .expect("Failed to create string") - ), - ("key".into(), aggregate_public_key_1.clone()), - ("new-total".into(), Value::UInt(4)), - ("reward-cycle".into(), Value::UInt(cycle_id + 2)), - ("round".into(), Value::UInt(1)), - ("signer".into(), Value::Principal(bob_principal.clone())), - ]) - .expect("Failed to create tuple") - .into() - ); - } else { - panic!("Expected SmartContractEvent, got {:?}", bob_vote_event); - } - - // The aggregate key is approved in round 1 - let approve_event = &bob_vote_tx.events[1]; - if let StacksTransactionEvent::SmartContractEvent(contract_event) = approve_event { - assert_eq!( - contract_event.value, - TupleData::from_data(vec![ - ( - "event".into(), - Value::string_ascii_from_bytes( - "approved-aggregate-public-key".as_bytes().to_vec() - ) - .expect("Failed to create string") - ), - ("key".into(), aggregate_public_key_1.clone()), - ("reward-cycle".into(), Value::UInt(cycle_id + 2)), - ("round".into(), Value::UInt(1)), - ]) - .expect("Failed to create tuple") - .into() - ); - } else { - panic!("Expected SmartContractEvent, got {:?}", approve_event); - } -} - -/// In this test case, Alice & Bob both successfully vote for the same key in -/// cycle N, then in cycle N + 1, Alice tries to vote before the prepare phase, -/// but fails with a "cycle not set" error. -#[test] -fn vote_for_aggregate_public_key_early() { - // Test setup - let alice = TestStacker::from_seed(&[3, 4]); - let bob = TestStacker::from_seed(&[5, 6]); - let observer = TestEventObserver::new(); - - // Alice - Signer 1 - let alice_key = &alice.signer_private_key; - let alice_address = key_to_stacks_addr(alice_key); - let alice_principal = PrincipalData::from(alice_address); - - // Bob - Signer 2 - let bob_key = &bob.signer_private_key; - let bob_address = key_to_stacks_addr(bob_key); - let bob_principal = PrincipalData::from(bob_address); - - let (mut peer, mut test_signers, latest_block_id, current_reward_cycle) = prepare_signers_test( - function_name!(), - vec![ - (alice_principal.clone(), 1000), - (bob_principal.clone(), 1000), - ], - &[alice.clone(), bob.clone()], - Some(&observer), - ); - - // Alice and Bob will each have voted once while booting to Nakamoto - let alice_nonce = 1; - let bob_nonce = 1; - - let cycle_id = current_reward_cycle; - - // create vote txs - let alice_index = get_signer_index(&mut peer, latest_block_id, alice_address, cycle_id); - let bob_index = get_signer_index(&mut peer, latest_block_id, bob_address, cycle_id); - - let mut signers = TestSigners::default(); - let aggregate_key = signers.generate_aggregate_key(cycle_id as u64 + 1); - let aggregate_public_key = Value::buff_from(aggregate_key.compress().data.to_vec()) - .expect("Failed to serialize aggregate public key"); - - let aggregate_public_key_ill_formed = Value::buff_from_byte(0x00); - - let txs = vec![ - // Alice casts vote correctly - make_signers_vote_for_aggregate_public_key_value( - alice_key, - alice_nonce, - alice_index, - aggregate_public_key.clone(), - 0, - cycle_id + 1, - ), - // Bob casts a vote correctly - make_signers_vote_for_aggregate_public_key_value( - bob_key, - bob_nonce, - bob_index, - aggregate_public_key.clone(), - 0, - cycle_id + 1, - ), - ]; - let blocks_and_sizes = nakamoto_tenure(&mut peer, &mut test_signers, vec![txs]); - let block = observer.get_blocks().last().unwrap().clone(); - let receipts = block.receipts.as_slice(); - assert_eq!(receipts.len(), 4); - - // Both votes should succeed - let alice_vote_tx = &receipts[2]; - assert_eq!(alice_vote_tx.result, Value::okay_true()); - assert_eq!(alice_vote_tx.events.len(), 1); - let bob_vote_tx = &receipts[3]; - assert_eq!(bob_vote_tx.result, Value::okay_true()); - assert_eq!(bob_vote_tx.events.len(), 2); - - // Proceed to the reward phase - let _ = nakamoto_tenure(&mut peer, &mut test_signers, Vec::new()); - let _ = nakamoto_tenure(&mut peer, &mut test_signers, Vec::new()); - - // In this tenure, signers have not been set yet, so the vote should fail - let aggregate_public_key_point = signers.generate_aggregate_key(cycle_id as u64 + 2); - let aggregate_public_key = - Value::buff_from(aggregate_public_key_point.compress().data.to_vec()) - .expect("Failed to serialize aggregate public key"); - - let txs = vec![ - // Alice casts vote for key 0 in round 0 - make_signers_vote_for_aggregate_public_key_value( - alice_key, - alice_nonce + 1, - alice_index, - aggregate_public_key.clone(), - 0, - cycle_id + 2, - ), - ]; - - // vote before the prepare phase - let blocks_and_sizes = nakamoto_tenure(&mut peer, &mut test_signers, vec![txs]); - - // check the last two txs in the last block - let block = observer.get_blocks().last().unwrap().clone(); - let receipts = block.receipts.as_slice(); - assert_eq!(receipts.len(), 3); - // ignore tenure change tx - // ignore tenure coinbase tx - - // Alice's vote should fail with a "cycle not set" error - let alice_vote_tx = &receipts[2]; - assert_eq!( - alice_vote_tx.result, - Value::err_uint(2) // ERR_CYCLE_NOT_SET - ); - assert_eq!(alice_vote_tx.events.len(), 0); - - // Proceed to the prepare phase - let _ = nakamoto_tenure(&mut peer, &mut test_signers, Vec::new()); - let _ = nakamoto_tenure(&mut peer, &mut test_signers, Vec::new()); - - let txs = vec![ - // Alice casts vote correctly - make_signers_vote_for_aggregate_public_key_value( - alice_key, - alice_nonce + 2, - alice_index, - aggregate_public_key.clone(), - 0, - cycle_id + 2, - ), - // Bob casts a vote correctly - make_signers_vote_for_aggregate_public_key_value( - bob_key, - bob_nonce + 1, - bob_index, - aggregate_public_key.clone(), - 0, - cycle_id + 2, - ), - ]; - let blocks_and_sizes = nakamoto_tenure(&mut peer, &mut test_signers, vec![txs]); - let block = observer.get_blocks().last().unwrap().clone(); - let receipts = block.receipts.as_slice(); - assert_eq!(receipts.len(), 4); - // ignore tenure change tx - // ignore tenure coinbase tx - - // This time, the votes should succeed and the key should be approved - let alice_vote_tx = &receipts[2]; - assert_eq!(alice_vote_tx.result, Value::okay_true()); - assert_eq!(alice_vote_tx.events.len(), 1); - let alice_vote_event = &alice_vote_tx.events[0]; - if let StacksTransactionEvent::SmartContractEvent(contract_event) = alice_vote_event { - assert_eq!( - contract_event.value, - TupleData::from_data(vec![ - ( - "event".into(), - Value::string_ascii_from_bytes("voted".as_bytes().to_vec()) - .expect("Failed to create string") - ), - ("key".into(), aggregate_public_key.clone()), - ("new-total".into(), Value::UInt(2)), - ("reward-cycle".into(), Value::UInt(cycle_id + 2)), - ("round".into(), Value::UInt(0)), - ("signer".into(), Value::Principal(alice_principal.clone())), - ]) - .expect("Failed to create tuple") - .into() - ); - } else { - panic!("Expected SmartContractEvent, got {:?}", alice_vote_event); - } - - let bob_vote_tx = &receipts[3]; - assert_eq!(bob_vote_tx.result, Value::okay_true()); - assert_eq!(bob_vote_tx.events.len(), 2); - let bob_vote_event = &bob_vote_tx.events[0]; - if let StacksTransactionEvent::SmartContractEvent(contract_event) = bob_vote_event { - assert_eq!( - contract_event.value, - TupleData::from_data(vec![ - ( - "event".into(), - Value::string_ascii_from_bytes("voted".as_bytes().to_vec()) - .expect("Failed to create string") - ), - ("key".into(), aggregate_public_key.clone()), - ("new-total".into(), Value::UInt(4)), - ("reward-cycle".into(), Value::UInt(cycle_id + 2)), - ("round".into(), Value::UInt(0)), - ("signer".into(), Value::Principal(bob_principal.clone())), - ]) - .expect("Failed to create tuple") - .into() - ); - } else { - panic!("Expected SmartContractEvent, got {:?}", bob_vote_event); - } - - // The aggregate key is approved in round 1 - let approve_event = &bob_vote_tx.events[1]; - if let StacksTransactionEvent::SmartContractEvent(contract_event) = approve_event { - assert_eq!( - contract_event.value, - TupleData::from_data(vec![ - ( - "event".into(), - Value::string_ascii_from_bytes( - "approved-aggregate-public-key".as_bytes().to_vec() - ) - .expect("Failed to create string") - ), - ("key".into(), aggregate_public_key.clone()), - ("reward-cycle".into(), Value::UInt(cycle_id + 2)), - ("round".into(), Value::UInt(0)), - ]) - .expect("Failed to create tuple") - .into() - ); - } else { - panic!("Expected SmartContractEvent, got {:?}", approve_event); - } -} - -/// In this test case, Alice votes in round 0 and Bob votes in round 1. -/// Although they both voted for the same key, the key is not approved. In the -/// next tenure, Bob votes in round 0, and the key is approved. -#[test] -fn vote_for_aggregate_public_key_mixed_rounds() { - // Test setup - let alice = TestStacker::from_seed(&[3, 4]); - let bob = TestStacker::from_seed(&[5, 6]); - let observer = TestEventObserver::new(); - - // Alice - Signer 1 - let alice_key = &alice.signer_private_key; - let alice_address = key_to_stacks_addr(alice_key); - let alice_principal = PrincipalData::from(alice_address); - - // Bob - Signer 2 - let bob_key = &bob.signer_private_key; - let bob_address = key_to_stacks_addr(bob_key); - let bob_principal = PrincipalData::from(bob_address); - - let (mut peer, mut test_signers, latest_block_id, current_reward_cycle) = prepare_signers_test( - function_name!(), - vec![ - (alice_principal.clone(), 1000), - (bob_principal.clone(), 1000), - ], - &[alice.clone(), bob.clone()], - Some(&observer), - ); - - // Alice and Bob will each have voted once while booting to Nakamoto - let alice_nonce = 1; - let bob_nonce = 1; - - let cycle_id = current_reward_cycle; - - // create vote txs - let alice_index = get_signer_index(&mut peer, latest_block_id, alice_address, cycle_id); - let bob_index = get_signer_index(&mut peer, latest_block_id, bob_address, cycle_id); - - let mut signers = TestSigners::default(); - let aggregate_public_key_point = signers.generate_aggregate_key(0); - let aggregate_public_key = - Value::buff_from(aggregate_public_key_point.compress().data.to_vec()) - .expect("Failed to serialize aggregate public key"); - - let txs = vec![ - // Alice casts vote for key 0 in round 0 - make_signers_vote_for_aggregate_public_key_value( - alice_key, - alice_nonce, - alice_index, - aggregate_public_key.clone(), - 0, - cycle_id + 1, - ), - // Bob casts a vote for key 0 in round 1 - make_signers_vote_for_aggregate_public_key_value( - bob_key, - bob_nonce, - bob_index, - aggregate_public_key.clone(), - 1, - cycle_id + 1, - ), - ]; - - // vote in the first burn block of prepare phase - let blocks_and_sizes = nakamoto_tenure(&mut peer, &mut test_signers, vec![txs]); - - // check the last four txs in the last block - let block = observer.get_blocks().last().unwrap().clone(); - let receipts = block.receipts.as_slice(); - assert_eq!(receipts.len(), 4); - // ignore tenure change tx - // ignore tenure coinbase tx - - // All votes should succeed - let alice_vote_tx = &receipts[2]; - assert_eq!(alice_vote_tx.result, Value::okay_true()); - assert_eq!(alice_vote_tx.events.len(), 1); - let alice_vote_event = &alice_vote_tx.events[0]; - if let StacksTransactionEvent::SmartContractEvent(contract_event) = alice_vote_event { - assert_eq!( - contract_event.value, - TupleData::from_data(vec![ - ( - "event".into(), - Value::string_ascii_from_bytes("voted".as_bytes().to_vec()) - .expect("Failed to create string") - ), - ("key".into(), aggregate_public_key.clone()), - ("new-total".into(), Value::UInt(2)), - ("reward-cycle".into(), Value::UInt(cycle_id + 1)), - ("round".into(), Value::UInt(0)), - ("signer".into(), Value::Principal(alice_principal.clone())), - ]) - .expect("Failed to create tuple") - .into() - ); - } else { - panic!("Expected SmartContractEvent, got {:?}", alice_vote_event); - } - - let bob_vote_tx = &receipts[3]; - assert_eq!(bob_vote_tx.result, Value::okay_true()); - assert_eq!(bob_vote_tx.events.len(), 1); - let bob_vote_event = &bob_vote_tx.events[0]; - if let StacksTransactionEvent::SmartContractEvent(contract_event) = bob_vote_event { - assert_eq!( - contract_event.value, - TupleData::from_data(vec![ - ( - "event".into(), - Value::string_ascii_from_bytes("voted".as_bytes().to_vec()) - .expect("Failed to create string") - ), - ("key".into(), aggregate_public_key.clone()), - ("new-total".into(), Value::UInt(2)), - ("reward-cycle".into(), Value::UInt(cycle_id + 1)), - ("round".into(), Value::UInt(1)), - ("signer".into(), Value::Principal(bob_principal.clone())), - ]) - .expect("Failed to create tuple") - .into() - ); - } else { - panic!("Expected SmartContractEvent, got {:?}", bob_vote_event); - } - - let txs = vec![ - // Bob casts a vote for key 0 in round 0 - make_signers_vote_for_aggregate_public_key_value( - bob_key, - bob_nonce + 1, - bob_index, - aggregate_public_key.clone(), - 0, - cycle_id + 1, - ), - // Alice casts vote for key 0 in round 1 - make_signers_vote_for_aggregate_public_key_value( - alice_key, - alice_nonce + 1, - alice_index, - aggregate_public_key.clone(), - 1, - cycle_id + 1, - ), - ]; - - // vote again in the next block of prepare phase - let blocks_and_sizes = nakamoto_tenure(&mut peer, &mut test_signers, vec![txs]); - - // check the last four txs in the last block - let block = observer.get_blocks().last().unwrap().clone(); - let receipts = block.receipts.as_slice(); - assert_eq!(receipts.len(), 4); - // ignore tenure change tx - // ignore tenure coinbase tx - - let bob_vote_tx = &receipts[2]; - assert_eq!(bob_vote_tx.result, Value::okay_true()); - assert_eq!(bob_vote_tx.events.len(), 2); - let bob_vote_event = &bob_vote_tx.events[0]; - if let StacksTransactionEvent::SmartContractEvent(contract_event) = bob_vote_event { - assert_eq!( - contract_event.value, - TupleData::from_data(vec![ - ( - "event".into(), - Value::string_ascii_from_bytes("voted".as_bytes().to_vec()) - .expect("Failed to create string") - ), - ("key".into(), aggregate_public_key.clone()), - ("new-total".into(), Value::UInt(4)), - ("reward-cycle".into(), Value::UInt(cycle_id + 1)), - ("round".into(), Value::UInt(0)), - ("signer".into(), Value::Principal(bob_principal.clone())), - ]) - .expect("Failed to create tuple") - .into() - ); - } else { - panic!("Expected SmartContractEvent, got {:?}", bob_vote_event); - } - - // The aggregate key is approved in round 0 - let approve_event = &bob_vote_tx.events[1]; - if let StacksTransactionEvent::SmartContractEvent(contract_event) = approve_event { - assert_eq!( - contract_event.value, - TupleData::from_data(vec![ - ( - "event".into(), - Value::string_ascii_from_bytes( - "approved-aggregate-public-key".as_bytes().to_vec() - ) - .expect("Failed to create string") - ), - ("key".into(), aggregate_public_key.clone()), - ("reward-cycle".into(), Value::UInt(cycle_id + 1)), - ("round".into(), Value::UInt(0)), - ]) - .expect("Failed to create tuple") - .into() - ); - } else { - panic!("Expected SmartContractEvent, got {:?}", approve_event); - } - - // Alice's vote should fail with an "out of voting window" error, since the - // key is already set - let alice_vote_tx = &receipts[3]; - assert_eq!(alice_vote_tx.result, Value::err_uint(12)); // ERR_OUT_OF_VOTING_WINDOW - assert_eq!(alice_vote_tx.events.len(), 0); -} - -// In this test case, Alice & Bob advance through setup & check -// the round info from the very first reward cycle & round. -#[test] -fn test_get_round_info() { - // Test setup - let alice = TestStacker::from_seed(&[3, 4]); - let bob = TestStacker::from_seed(&[5, 6]); - let observer = TestEventObserver::new(); - - // Alice - Signer 1 - let alice_key = &alice.signer_private_key; - let alice_address = key_to_stacks_addr(alice_key); - let alice_principal = PrincipalData::from(alice_address); - - // Bob - Signer 2 - let bob_key = &bob.signer_private_key; - let bob_address = key_to_stacks_addr(bob_key); - let bob_principal = PrincipalData::from(bob_address); - - let (mut peer, test_signers, latest_block_id, current_reward_cycle) = prepare_signers_test( - function_name!(), - vec![ - (alice_principal.clone(), 1000), - (bob_principal.clone(), 1000), - ], - &[alice.clone(), bob.clone()], - Some(&observer), - ); - - // Get the current creward cycle - let cycle_id = current_reward_cycle; - - let round_info = get_round_info(&mut peer, latest_block_id, cycle_id, 0) - .unwrap() - .expect_tuple() - .unwrap(); - let votes_count = round_info.get("votes-count").unwrap(); - let votes_weight = round_info.get("votes-weight").unwrap(); - - assert_eq!(votes_count, &Value::UInt(2)); - assert_eq!(votes_weight, &Value::UInt(4)); -} - -pub fn get_round_info( - peer: &mut TestPeer<'_>, - latest_block_id: StacksBlockId, - reward_cycle: u128, - round: u128, -) -> Option { - let round_tuple = readonly_call( - peer, - &latest_block_id, - "signers-voting".into(), - "get-round-info".into(), - vec![Value::UInt(reward_cycle), Value::UInt(round)], - ) - .expect_optional() - .unwrap(); - round_tuple -} - -// In this test case, Alice & Bob advance through setup & check -// the weight threshold info from the very first reward cycle & round. -#[test] -fn test_get_threshold_weight() { - // Test setup - let alice = TestStacker::from_seed(&[3, 4]); - let bob = TestStacker::from_seed(&[5, 6]); - let observer = TestEventObserver::new(); - - // Alice - Signer 1 - let alice_key = &alice.signer_private_key; - let alice_address = key_to_stacks_addr(alice_key); - let alice_principal = PrincipalData::from(alice_address); - - // Bob - Signer 2 - let bob_key = &bob.signer_private_key; - let bob_address = key_to_stacks_addr(bob_key); - let bob_principal = PrincipalData::from(bob_address); - - let (mut peer, test_signers, latest_block_id, current_reward_cycle) = prepare_signers_test( - function_name!(), - vec![ - (alice_principal.clone(), 1000), - (bob_principal.clone(), 1000), - ], - &[alice.clone(), bob.clone()], - Some(&observer), - ); - - // Get the current creward cycle - let cycle_id = current_reward_cycle; - - // Call get-threshold-weight - let threshold_weight: u128 = get_threshold_weight(&mut peer, latest_block_id, cycle_id); - - // Since there are four votes, the threshold weight should be 3 (75% of 4) - assert_eq!(threshold_weight, 3); -} - -pub fn get_threshold_weight( - peer: &mut TestPeer<'_>, - latest_block_id: StacksBlockId, - reward_cycle: u128, -) -> u128 { - let threshold_weight = readonly_call( - peer, - &latest_block_id, - "signers-voting".into(), - "get-threshold-weight".into(), - vec![Value::UInt(reward_cycle)], - ) - .expect_u128() - .unwrap(); - threshold_weight -} - -fn nakamoto_tenure( - peer: &mut TestPeer, - test_signers: &mut TestSigners, - txs_of_blocks: Vec>, -) -> Vec<(NakamotoBlock, u64, ExecutionCost)> { - let current_height = peer.get_burnchain_view().unwrap().burn_block_height; - - info!("current height: {}", current_height); - - let (burn_ops, mut tenure_change, miner_key) = - peer.begin_nakamoto_tenure(TenureChangeCause::BlockFound); - - let (_, _, consensus_hash) = peer.next_burnchain_block(burn_ops); - - let vrf_proof = peer.make_nakamoto_vrf_proof(miner_key); - - tenure_change.tenure_consensus_hash = consensus_hash.clone(); - tenure_change.burn_view_consensus_hash = consensus_hash.clone(); - let tenure_change_tx = peer - .miner - .make_nakamoto_tenure_change(tenure_change.clone()); - let coinbase_tx = peer.miner.make_nakamoto_coinbase(None, vrf_proof); - let recipient_addr = boot_code_addr(false); - let mut mutable_txs_of_blocks = txs_of_blocks.clone(); - mutable_txs_of_blocks.reverse(); - let blocks_and_sizes = peer.make_nakamoto_tenure( - tenure_change_tx, - coinbase_tx.clone(), - test_signers, - |miner, chainstate, sortdb, blocks| mutable_txs_of_blocks.pop().unwrap_or(vec![]), - ); - info!("tenure length {}", blocks_and_sizes.len()); - blocks_and_sizes -} - -fn make_dummy_tx( - peer: &mut TestPeer, - private_key: &StacksPrivateKey, - nonce: &mut u64, -) -> StacksTransaction { - peer.with_db_state(|sortdb, chainstate, _, _| { - let addr = key_to_stacks_addr(&private_key); - let account = get_account(chainstate, sortdb, &addr); - let recipient_addr = boot_code_addr(false); - let stx_transfer = make_token_transfer( - chainstate, - sortdb, - &private_key, - *nonce, - 1, - 1, - &recipient_addr, - ); - *nonce += 1; - Ok(stx_transfer) - }) - .unwrap() -} diff --git a/stackslib/src/chainstate/stacks/db/accounts.rs b/stackslib/src/chainstate/stacks/db/accounts.rs index 7a10503b878..58ffdaeb603 100644 --- a/stackslib/src/chainstate/stacks/db/accounts.rs +++ b/stackslib/src/chainstate/stacks/db/accounts.rs @@ -16,11 +16,12 @@ use std::collections::HashMap; +use clarity::types::chainstate::TenureBlockId; use clarity::vm::database::clarity_store::*; use clarity::vm::database::*; use clarity::vm::types::*; use rusqlite::types::ToSql; -use rusqlite::Row; +use rusqlite::{params, Row}; use stacks_common::types::chainstate::{StacksAddress, StacksBlockId}; use crate::burnchains::Address; @@ -413,24 +414,24 @@ impl StacksChainState { } }; - let args: &[&dyn ToSql] = &[ - &block_reward.address.to_string(), - &block_reward.recipient.to_string(), - &block_reward.block_hash, - &block_reward.consensus_hash, - &block_reward.parent_block_hash, - &block_reward.parent_consensus_hash, - &block_reward.coinbase.to_string(), - &db_tx_fees_anchored.to_string(), - &db_tx_fees_streamed.to_string(), - &u64_to_sql(block_reward.burnchain_commit_burn)?, - &u64_to_sql(block_reward.burnchain_sortition_burn)?, - &u64_to_sql(block_reward.stacks_block_height)?, - &true, - &0i64, - &index_block_hash, - &payment_type, - &"0".to_string(), + let args = params![ + block_reward.address.to_string(), + block_reward.recipient.to_string(), + block_reward.block_hash, + block_reward.consensus_hash, + block_reward.parent_block_hash, + block_reward.parent_consensus_hash, + block_reward.coinbase.to_string(), + db_tx_fees_anchored.to_string(), + db_tx_fees_streamed.to_string(), + u64_to_sql(block_reward.burnchain_commit_burn)?, + u64_to_sql(block_reward.burnchain_sortition_burn)?, + u64_to_sql(block_reward.stacks_block_height)?, + true, + 0i64, + index_block_hash, + payment_type, + "0".to_string(), ]; tx.execute( @@ -472,8 +473,8 @@ impl StacksChainState { // trying to store the same matured rewards for a common ancestor block. let cur_rewards = StacksChainState::inner_get_matured_miner_payments( tx, - parent_block_id, - child_block_id, + &(*parent_block_id).into(), + &(*child_block_id).into(), )?; if cur_rewards.len() > 0 { let mut present = false; @@ -503,14 +504,14 @@ impl StacksChainState { child_index_block_hash ) VALUES (?1,?2,?3,?4,?5,?6,?7,?8,?9)"; - let args: &[&dyn ToSql] = &[ - &reward.address.to_string(), - &reward.recipient.to_string(), - &reward.vtxindex, - &reward.coinbase.to_string(), - &reward.tx_fees_anchored.to_string(), - &reward.tx_fees_streamed_confirmed.to_string(), - &reward.tx_fees_streamed_produced.to_string(), + let args = params![ + reward.address.to_string(), + reward.recipient.to_string(), + reward.vtxindex, + reward.coinbase.to_string(), + reward.tx_fees_anchored.to_string(), + reward.tx_fees_streamed_confirmed.to_string(), + reward.tx_fees_streamed_produced.to_string(), parent_block_id, child_block_id, ]; @@ -608,11 +609,11 @@ impl StacksChainState { fn inner_get_matured_miner_payments( conn: &DBConn, - parent_block_id: &StacksBlockId, - child_block_id: &StacksBlockId, + parent_block_id: &TenureBlockId, + child_block_id: &TenureBlockId, ) -> Result, Error> { let sql = "SELECT * FROM matured_rewards WHERE parent_index_block_hash = ?1 AND child_index_block_hash = ?2 AND vtxindex = 0"; - let args: &[&dyn ToSql] = &[parent_block_id, child_block_id]; + let args = params![parent_block_id.0, child_block_id.0]; let ret: Vec = query_rows(conn, sql, args).map_err(|e| Error::DBError(e))?; Ok(ret) } @@ -621,8 +622,8 @@ impl StacksChainState { /// You'd be querying for the `child_block_id`'s reward. pub fn get_matured_miner_payment( conn: &DBConn, - parent_block_id: &StacksBlockId, - child_block_id: &StacksBlockId, + parent_block_id: &TenureBlockId, + child_block_id: &TenureBlockId, ) -> Result, Error> { let config = StacksChainState::load_db_config(conn)?; let ret = StacksChainState::inner_get_matured_miner_payments( @@ -643,8 +644,8 @@ impl StacksChainState { panic!("FATAL: got two parent rewards"); }; Ok(Some(reward)) - } else if child_block_id - == &StacksBlockHeader::make_index_block_hash( + } else if child_block_id.0 + == StacksBlockHeader::make_index_block_hash( &FIRST_BURNCHAIN_CONSENSUS_HASH, &FIRST_STACKS_BLOCK_HASH, ) @@ -675,7 +676,7 @@ impl StacksChainState { ) -> Result, Error> { let qry = "SELECT * FROM payments WHERE index_block_hash = ?1 ORDER BY vtxindex ASC".to_string(); - let args: &[&dyn ToSql] = &[index_block_hash]; + let args = params![index_block_hash]; let rows = query_rows::(conn, &qry, args).map_err(Error::DBError)?; test_debug!("{} rewards in {}", rows.len(), index_block_hash); @@ -697,9 +698,9 @@ impl StacksChainState { }; let qry = "SELECT * FROM payments WHERE block_hash = ?1 AND consensus_hash = ?2 ORDER BY vtxindex ASC".to_string(); - let args: &[&dyn ToSql] = &[ - &ancestor_info.anchored_header.block_hash(), - &ancestor_info.consensus_hash, + let args = params![ + ancestor_info.anchored_header.block_hash(), + ancestor_info.consensus_hash, ]; let rows = query_rows::(tx, &qry, args).map_err(Error::DBError)?; test_debug!( @@ -733,12 +734,9 @@ impl StacksChainState { let qry = "SELECT * FROM payments WHERE consensus_hash = ?1 AND block_hash = ?2 AND miner = 1" .to_string(); - let args = [ - consensus_hash as &dyn ToSql, - stacks_block_hash as &dyn ToSql, - ]; + let args = params![consensus_hash, stacks_block_hash,]; let mut rows = - query_rows::(conn, &qry, &args).map_err(Error::DBError)?; + query_rows::(conn, &qry, args).map_err(Error::DBError)?; let len = rows.len(); match len { 0 => { @@ -1130,7 +1128,7 @@ mod test { block_reward.block_hash = new_tip.anchored_header.block_hash(); block_reward.consensus_hash = new_tip.consensus_hash.clone(); - let mut tx = chainstate.index_tx_begin().unwrap(); + let mut tx = chainstate.index_tx_begin(); let tip = StacksChainState::advance_tip( &mut tx, parent_header_info @@ -1186,7 +1184,7 @@ mod test { ); { - let mut tx = chainstate.index_tx_begin().unwrap(); + let mut tx = chainstate.index_tx_begin(); let ancestor_0 = StacksChainState::get_tip_ancestor( &mut tx, &StacksHeaderInfo::regtest_genesis(), @@ -1203,7 +1201,7 @@ mod test { ); { - let mut tx = chainstate.index_tx_begin().unwrap(); + let mut tx = chainstate.index_tx_begin(); let ancestor_0 = StacksChainState::get_tip_ancestor(&mut tx, &parent_tip, 0).unwrap(); let ancestor_1 = StacksChainState::get_tip_ancestor(&mut tx, &parent_tip, 1).unwrap(); @@ -1216,7 +1214,7 @@ mod test { let tip = advance_tip(&mut chainstate, &parent_tip, &mut tip_reward); { - let mut tx = chainstate.index_tx_begin().unwrap(); + let mut tx = chainstate.index_tx_begin(); let ancestor_2 = StacksChainState::get_tip_ancestor(&mut tx, &tip, 2).unwrap(); let ancestor_1 = StacksChainState::get_tip_ancestor(&mut tx, &tip, 1).unwrap(); let ancestor_0 = StacksChainState::get_tip_ancestor(&mut tx, &tip, 0).unwrap(); @@ -1262,7 +1260,7 @@ mod test { let tip = advance_tip(&mut chainstate, &parent_tip, &mut tip_reward); { - let mut tx = chainstate.index_tx_begin().unwrap(); + let mut tx = chainstate.index_tx_begin(); let payments_0 = StacksChainState::get_scheduled_block_rewards_in_fork_at_height(&mut tx, &tip, 0) .unwrap(); @@ -1312,7 +1310,7 @@ mod test { let tip = advance_tip(&mut chainstate, &parent_tip, &mut tip_reward); { - let mut tx = chainstate.index_tx_begin().unwrap(); + let mut tx = chainstate.index_tx_begin(); let payments_0 = StacksChainState::get_scheduled_block_rewards_in_fork_at_height(&mut tx, &tip, 0) .unwrap(); diff --git a/stackslib/src/chainstate/stacks/db/blocks.rs b/stackslib/src/chainstate/stacks/db/blocks.rs index dd70fcfb01b..63c22fafb68 100644 --- a/stackslib/src/chainstate/stacks/db/blocks.rs +++ b/stackslib/src/chainstate/stacks/db/blocks.rs @@ -34,7 +34,10 @@ use clarity::vm::types::{ TypeSignature, Value, }; use rand::{thread_rng, Rng, RngCore}; -use rusqlite::{Connection, DatabaseName, Error as sqlite_error, OptionalExtension}; +use rusqlite::types::ToSql; +use rusqlite::{ + params, Connection, DatabaseName, Error as sqlite_error, OptionalExtension, Params, +}; use serde::Serialize; use serde_json::json; use stacks_common::bitvec::BitVec; @@ -42,6 +45,7 @@ use stacks_common::codec::{read_next, write_next, MAX_MESSAGE_LEN}; use stacks_common::types::chainstate::{ BurnchainHeaderHash, SortitionId, StacksAddress, StacksBlockId, }; +use stacks_common::types::sqlite::NO_PARAMS; use stacks_common::util::hash::to_hex; use stacks_common::util::retry::BoundReader; use stacks_common::util::{get_epoch_time_ms, get_epoch_time_secs}; @@ -766,11 +770,11 @@ impl StacksChainState { for (consensus_hash, block_hash) in blocks.drain(..) { let list_microblock_sql = "SELECT * FROM staging_microblocks WHERE anchored_block_hash = ?1 AND consensus_hash = ?2 ORDER BY sequence".to_string(); - let list_microblock_args: [&dyn ToSql; 2] = [&block_hash, &consensus_hash]; + let list_microblock_args = params![block_hash, consensus_hash]; let mut microblocks = query_rows::( blocks_conn, &list_microblock_sql, - &list_microblock_args, + list_microblock_args, ) .map_err(Error::DBError)?; @@ -894,8 +898,7 @@ impl StacksChainState { sql_args: P, ) -> Result>, Error> where - P: IntoIterator, - P::Item: ToSql, + P: Params, { let mut stmt = conn .prepare(sql_query) @@ -961,7 +964,7 @@ impl StacksChainState { minimum_block_height: i64, ) -> bool { let sql = "SELECT 1 FROM staging_blocks WHERE microblock_pubkey_hash = ?1 AND height >= ?2"; - let args: &[&dyn ToSql] = &[pubkey_hash, &minimum_block_height]; + let args = params![pubkey_hash, minimum_block_height]; block_conn .query_row(sql, args, |_r| Ok(())) .optional() @@ -977,7 +980,7 @@ impl StacksChainState { block_hash: &BlockHeaderHash, ) -> Result, Error> { let sql = "SELECT * FROM staging_blocks WHERE anchored_block_hash = ?1 AND consensus_hash = ?2 AND orphaned = 0 AND processed = 0".to_string(); - let args: &[&dyn ToSql] = &[&block_hash, &consensus_hash]; + let args = params![block_hash, consensus_hash]; let mut rows = query_rows::(block_conn, &sql, args).map_err(Error::DBError)?; let len = rows.len(); @@ -1006,7 +1009,7 @@ impl StacksChainState { index_block_hash: &StacksBlockId, ) -> Result, Error> { let sql = "SELECT * FROM staging_blocks WHERE index_block_hash = ?1 AND orphaned = 0"; - let args: &[&dyn ToSql] = &[&index_block_hash]; + let args = params![index_block_hash]; query_row::(block_conn, sql, args).map_err(Error::DBError) } @@ -1056,7 +1059,7 @@ impl StacksChainState { block_hash: &BlockHeaderHash, ) -> Result, Error> { let sql = "SELECT microblock_pubkey_hash FROM staging_blocks WHERE anchored_block_hash = ?1 AND consensus_hash = ?2 AND processed = 0 AND orphaned = 0"; - let args: &[&dyn ToSql] = &[&block_hash, &consensus_hash]; + let args = params![block_hash, consensus_hash]; let rows = query_row_columns::(block_conn, sql, args, "microblock_pubkey_hash") .map_err(Error::DBError)?; match rows.len() { @@ -1111,7 +1114,7 @@ impl StacksChainState { microblock_hash: &BlockHeaderHash, ) -> Result, Error> { let sql = "SELECT * FROM staging_microblocks WHERE index_block_hash = ?1 AND microblock_hash = ?2 AND orphaned = 0 LIMIT 1"; - let args: &[&dyn ToSql] = &[&parent_index_block_hash, µblock_hash]; + let args = params![parent_index_block_hash, microblock_hash]; query_row::(blocks_conn, sql, args).map_err(Error::DBError) } @@ -1124,7 +1127,7 @@ impl StacksChainState { index_microblock_hash: &StacksBlockId, ) -> Result, Error> { let sql = "SELECT * FROM staging_microblocks WHERE index_microblock_hash = ?1 AND orphaned = 0 LIMIT 1"; - let args: &[&dyn ToSql] = &[&index_microblock_hash]; + let args = params![index_microblock_hash]; query_row::(blocks_conn, sql, args).map_err(Error::DBError) } @@ -1329,7 +1332,7 @@ impl StacksChainState { "SELECT * FROM staging_microblocks WHERE index_block_hash = ?1 AND sequence >= ?2 AND sequence < ?3 AND orphaned = 0 ORDER BY sequence ASC".to_string() }; - let args: &[&dyn ToSql] = &[parent_index_block_hash, &start_seq, &last_seq]; + let args = params![parent_index_block_hash, start_seq, last_seq]; let staging_microblocks = query_rows::(blocks_conn, &sql, args).map_err(Error::DBError)?; @@ -1562,8 +1565,7 @@ impl StacksChainState { // if this block has an unprocessed staging parent, then it's not attachable until its parent is. let has_unprocessed_parent_sql = "SELECT anchored_block_hash FROM staging_blocks WHERE anchored_block_hash = ?1 AND consensus_hash = ?2 AND processed = 0 AND orphaned = 0 LIMIT 1"; let has_parent_sql = "SELECT anchored_block_hash FROM staging_blocks WHERE anchored_block_hash = ?1 AND consensus_hash = ?2 LIMIT 1"; - let has_parent_args: &[&dyn ToSql] = - &[&block.header.parent_block, &parent_consensus_hash]; + let has_parent_args = params![block.header.parent_block, parent_consensus_hash]; let has_unprocessed_parent_rows = query_row_columns::( &tx, has_unprocessed_parent_sql, @@ -1614,24 +1616,25 @@ impl StacksChainState { processed_time, \ download_time) \ VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7, ?8, ?9, ?10, ?11, ?12, ?13, ?14, ?15, ?16, ?17)"; - let args: &[&dyn ToSql] = &[ - &block_hash, - &block.header.parent_block, - &consensus_hash, - &parent_consensus_hash, - &block.header.parent_microblock, - &block.header.parent_microblock_sequence, - &block.header.microblock_pubkey_hash, - &u64_to_sql(block.header.total_work.work)?, - &attachable, - &0, - &0, - &u64_to_sql(commit_burn)?, - &u64_to_sql(sortition_burn)?, - &index_block_hash, - &u64_to_sql(get_epoch_time_secs())?, - &0, - &u64_to_sql(download_time)?, + + let args = params![ + block_hash, + block.header.parent_block, + consensus_hash, + parent_consensus_hash, + block.header.parent_microblock, + block.header.parent_microblock_sequence, + block.header.microblock_pubkey_hash, + u64_to_sql(block.header.total_work.work)?, + attachable, + 0, + 0, + u64_to_sql(commit_burn)?, + u64_to_sql(sortition_burn)?, + index_block_hash, + u64_to_sql(get_epoch_time_secs())?, + 0, + u64_to_sql(download_time)?, ]; tx.execute(&sql, args) @@ -1687,16 +1690,16 @@ impl StacksChainState { // store microblock metadata let sql = "INSERT OR REPLACE INTO staging_microblocks (anchored_block_hash, consensus_hash, index_block_hash, microblock_hash, parent_hash, index_microblock_hash, sequence, processed, orphaned) VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7, ?8, ?9)"; - let args: &[&dyn ToSql] = &[ - &parent_anchored_block_hash, - &parent_consensus_hash, - &index_block_hash, - µblock.block_hash(), - µblock.header.prev_block, - &index_microblock_hash, - µblock.header.sequence, - &0, - &0, + let args = params![ + parent_anchored_block_hash, + parent_consensus_hash, + index_block_hash, + microblock.block_hash(), + microblock.header.prev_block, + index_microblock_hash, + microblock.header.sequence, + 0, + 0, ]; tx.execute(&sql, args) @@ -1706,7 +1709,7 @@ impl StacksChainState { let block_sql = "INSERT OR REPLACE INTO staging_microblocks_data \ (block_hash, block_data) VALUES (?1, ?2)"; - let block_args: &[&dyn ToSql] = &[µblock.block_hash(), µblock_bytes]; + let block_args = params![microblock.block_hash(), microblock_bytes]; tx.execute(&block_sql, block_args) .map_err(|e| Error::DBError(db_error::SqliteError(e)))?; @@ -1851,7 +1854,7 @@ impl StacksChainState { }; let sql = "SELECT 1 FROM staging_microblocks WHERE index_block_hash = ?1 AND microblock_hash = ?2 AND processed = 1 AND orphaned = 0"; - let args: &[&dyn ToSql] = &[&parent_index_block_hash, &parent_microblock_hash]; + let args = params![parent_index_block_hash, parent_microblock_hash]; let res = self .db() .query_row(sql, args, |_r| Ok(())) @@ -2024,10 +2027,7 @@ impl StacksChainState { ); let sql = "SELECT COALESCE(MIN(block_height), 0), COALESCE(MAX(block_height), 0) FROM block_headers WHERE burn_header_height >= ?1 AND burn_header_height < ?2"; - let args: &[&dyn ToSql] = &[ - &u64_to_sql(burn_height_start)?, - &u64_to_sql(burn_height_end)?, - ]; + let args = params![u64_to_sql(burn_height_start)?, u64_to_sql(burn_height_end)?,]; self.db() .query_row(sql, args, |row| { @@ -2075,7 +2075,7 @@ impl StacksChainState { FROM staging_blocks LEFT JOIN staging_microblocks \ ON staging_blocks.parent_microblock_hash = staging_microblocks.microblock_hash \ WHERE staging_blocks.height >= ?1 AND staging_blocks.height <= ?2"; - let args: &[&dyn ToSql] = &[&u64_to_sql(start_height)?, &u64_to_sql(end_height)?]; + let args = params![u64_to_sql(start_height)?, u64_to_sql(end_height)?]; let mut stmt = self.db().prepare(sql)?; @@ -2152,7 +2152,7 @@ impl StacksChainState { block_hash: &BlockHeaderHash, ) -> Result, Error> { let qry = "SELECT consensus_hash FROM staging_blocks WHERE anchored_block_hash = ?1"; - let args: &[&dyn ToSql] = &[block_hash]; + let args = params![block_hash]; query_rows(conn, qry, args).map_err(|e| e.into()) } @@ -2298,16 +2298,16 @@ impl StacksChainState { ) -> Result<(), Error> { // This block is orphaned let update_block_sql = "UPDATE staging_blocks SET orphaned = 1, processed = 1, attachable = 0 WHERE consensus_hash = ?1 AND anchored_block_hash = ?2"; - let update_block_args: &[&dyn ToSql] = &[consensus_hash, anchored_block_hash]; + let update_block_args = params![consensus_hash, anchored_block_hash]; // All descendants of this processed block are never attachable. // Indicate this by marking all children as orphaned (but not procesed), across all burnchain forks. let update_children_sql = "UPDATE staging_blocks SET orphaned = 1, processed = 0, attachable = 0 WHERE parent_consensus_hash = ?1 AND parent_anchored_block_hash = ?2"; - let update_children_args: &[&dyn ToSql] = &[consensus_hash, anchored_block_hash]; + let update_children_args = params![consensus_hash, anchored_block_hash]; // find all orphaned microblocks, and delete the block data let find_orphaned_microblocks_sql = "SELECT microblock_hash FROM staging_microblocks WHERE consensus_hash = ?1 AND anchored_block_hash = ?2"; - let find_orphaned_microblocks_args: &[&dyn ToSql] = &[consensus_hash, anchored_block_hash]; + let find_orphaned_microblocks_args = params![consensus_hash, anchored_block_hash]; let orphaned_microblock_hashes = query_row_columns::( tx, find_orphaned_microblocks_sql, @@ -2317,7 +2317,7 @@ impl StacksChainState { // drop microblocks (this processes them) let update_microblock_children_sql = "UPDATE staging_microblocks SET orphaned = 1, processed = 1 WHERE consensus_hash = ?1 AND anchored_block_hash = ?2"; - let update_microblock_children_args: &[&dyn ToSql] = &[consensus_hash, anchored_block_hash]; + let update_microblock_children_args = params![consensus_hash, anchored_block_hash]; tx.execute(update_block_sql, update_block_args)?; @@ -2364,7 +2364,7 @@ impl StacksChainState { ); let sql = "DELETE FROM staging_blocks WHERE consensus_hash = ?1 AND anchored_block_hash = ?2 AND orphaned = 1 AND processed = 1"; - let args: &[&dyn ToSql] = &[consensus_hash, anchored_block_hash]; + let args = params![consensus_hash, anchored_block_hash]; tx.execute(sql, args)?; @@ -2388,7 +2388,7 @@ impl StacksChainState { accept: bool, ) -> Result<(), Error> { let sql = "SELECT * FROM staging_blocks WHERE consensus_hash = ?1 AND anchored_block_hash = ?2 AND orphaned = 0".to_string(); - let args: &[&dyn ToSql] = &[&consensus_hash, &anchored_block_hash]; + let args = params![consensus_hash, anchored_block_hash]; let has_stored_block = StacksChainState::has_stored_block( tx, @@ -2402,7 +2402,7 @@ impl StacksChainState { 0 => { // not an error if this block was already orphaned let orphan_sql = "SELECT * FROM staging_blocks WHERE consensus_hash = ?1 AND anchored_block_hash = ?2 AND orphaned = 1".to_string(); - let orphan_args: &[&dyn ToSql] = &[&consensus_hash, &anchored_block_hash]; + let orphan_args = params![consensus_hash, anchored_block_hash]; let orphan_rows = query_rows::(tx, &orphan_sql, orphan_args) .map_err(Error::DBError)?; if orphan_rows.len() == 1 { @@ -2423,17 +2423,14 @@ impl StacksChainState { } }; + let stacks_block_id = + StacksBlockHeader::make_index_block_hash(&consensus_hash, &anchored_block_hash); if !block.processed { if !has_stored_block { if accept { debug!( "Accept block {}/{} as {}", - consensus_hash, - anchored_block_hash, - StacksBlockHeader::make_index_block_hash( - &consensus_hash, - &anchored_block_hash - ) + consensus_hash, anchored_block_hash, stacks_block_id ); } else { info!("Reject block {}/{}", consensus_hash, anchored_block_hash); @@ -2441,25 +2438,21 @@ impl StacksChainState { } else { debug!( "Already stored block {}/{} ({})", - consensus_hash, - anchored_block_hash, - StacksBlockHeader::make_index_block_hash(&consensus_hash, &anchored_block_hash) + consensus_hash, anchored_block_hash, stacks_block_id ); } } else { debug!( "Already processed block {}/{} ({})", - consensus_hash, - anchored_block_hash, - StacksBlockHeader::make_index_block_hash(&consensus_hash, &anchored_block_hash) + consensus_hash, anchored_block_hash, stacks_block_id ); } let update_sql = "UPDATE staging_blocks SET processed = 1, processed_time = ?1 WHERE consensus_hash = ?2 AND anchored_block_hash = ?3".to_string(); - let update_args: &[&dyn ToSql] = &[ - &u64_to_sql(get_epoch_time_secs())?, - &consensus_hash, - &anchored_block_hash, + let update_args = params![ + u64_to_sql(get_epoch_time_secs())?, + consensus_hash, + anchored_block_hash, ]; tx.execute(&update_sql, update_args) @@ -2524,11 +2517,11 @@ impl StacksChainState { &index_block_hash ); let update_block_sql = "UPDATE staging_blocks SET orphaned = 1, processed = 1, attachable = 0 WHERE consensus_hash = ?1 AND anchored_block_hash = ?2".to_string(); - let update_block_args: &[&dyn ToSql] = &[consensus_hash, anchored_block_hash]; + let update_block_args = params![consensus_hash, anchored_block_hash]; // find all orphaned microblocks, and delete the block data let find_orphaned_microblocks_sql = "SELECT microblock_hash FROM staging_microblocks WHERE consensus_hash = ?1 AND anchored_block_hash = ?2"; - let find_orphaned_microblocks_args: &[&dyn ToSql] = &[consensus_hash, anchored_block_hash]; + let find_orphaned_microblocks_args = params![consensus_hash, anchored_block_hash]; let orphaned_microblock_hashes = query_row_columns::( tx, find_orphaned_microblocks_sql, @@ -2545,7 +2538,7 @@ impl StacksChainState { &index_block_hash ); let update_microblock_children_sql = "UPDATE staging_microblocks SET orphaned = 1, processed = 1 WHERE consensus_hash = ?1 AND anchored_block_hash = ?2".to_string(); - let update_microblock_children_args: &[&dyn ToSql] = &[consensus_hash, anchored_block_hash]; + let update_microblock_children_args = params![consensus_hash, anchored_block_hash]; tx.execute(&update_block_sql, update_block_args) .map_err(|e| Error::DBError(db_error::SqliteError(e)))?; @@ -2581,7 +2574,7 @@ impl StacksChainState { ) -> Result<(), Error> { // find offending sequence let seq_sql = "SELECT sequence FROM staging_microblocks WHERE consensus_hash = ?1 AND anchored_block_hash = ?2 AND microblock_hash = ?3 AND processed = 0 AND orphaned = 0".to_string(); - let seq_args: &[&dyn ToSql] = &[&consensus_hash, &anchored_block_hash, &invalid_block_hash]; + let seq_args = params![consensus_hash, anchored_block_hash, invalid_block_hash]; let seq = match query_int::<_>(tx, &seq_sql, seq_args) { Ok(seq) => seq, Err(e) => match e { @@ -2602,7 +2595,7 @@ impl StacksChainState { // drop staging children at and beyond the invalid block let update_microblock_children_sql = "UPDATE staging_microblocks SET orphaned = 1, processed = 1 WHERE anchored_block_hash = ?1 AND sequence >= ?2".to_string(); - let update_microblock_children_args: &[&dyn ToSql] = &[&anchored_block_hash, &seq]; + let update_microblock_children_args = params![anchored_block_hash, seq]; tx.execute( &update_microblock_children_sql, @@ -2612,7 +2605,7 @@ impl StacksChainState { // find all orphaned microblocks hashes, and delete the block data let find_orphaned_microblocks_sql = "SELECT microblock_hash FROM staging_microblocks WHERE anchored_block_hash = ?1 AND sequence >= ?2"; - let find_orphaned_microblocks_args: &[&dyn ToSql] = &[&anchored_block_hash, &seq]; + let find_orphaned_microblocks_args = params![anchored_block_hash, seq]; let orphaned_microblock_hashes = query_row_columns::( tx, find_orphaned_microblocks_sql, @@ -2667,7 +2660,7 @@ impl StacksChainState { test_debug!("Set {}-{} processed", &parent_index_hash, &mblock_hash); // confirm this microblock - let args: &[&dyn ToSql] = &[&parent_consensus_hash, &parent_block_hash, &mblock_hash]; + let args = params![parent_consensus_hash, parent_block_hash, mblock_hash]; tx.execute(sql, args) .map_err(|e| Error::DBError(db_error::SqliteError(e)))?; @@ -2738,7 +2731,7 @@ impl StacksChainState { index_microblock_hash: &StacksBlockId, ) -> Result { let sql = "SELECT 1 FROM staging_microblocks WHERE index_microblock_hash = ?1 AND processed = 1 AND orphaned = 0"; - let args: &[&dyn ToSql] = &[index_microblock_hash]; + let args = params![index_microblock_hash]; let res = conn .query_row(&sql, args, |_r| Ok(())) .optional() @@ -2832,10 +2825,10 @@ impl StacksChainState { "SELECT {},{} FROM staging_blocks WHERE index_block_hash = ?1", consensus_hash_col, anchored_block_col ); - let args = [index_block_hash as &dyn ToSql]; + let args = params![index_block_hash]; blocks_db - .query_row(&sql, &args, |row| { + .query_row(&sql, args, |row| { let anchored_block_hash = BlockHeaderHash::from_column(row, anchored_block_col) .expect("Expected anchored_block_hash - database corrupted"); let consensus_hash = ConsensusHash::from_column(row, consensus_hash_col) @@ -2884,11 +2877,8 @@ impl StacksChainState { staging_microblocks JOIN staging_microblocks_data \ ON staging_microblocks.microblock_hash = staging_microblocks_data.block_hash \ WHERE staging_microblocks.index_block_hash = ?1 AND staging_microblocks.microblock_hash = ?2"; - let args = [ - parent_index_block_hash as &dyn ToSql, - microblock_hash as &dyn ToSql, - ]; - query_row(blocks_conn, sql, &args).map_err(Error::DBError) + let args = params![parent_index_block_hash, microblock_hash,]; + query_row(blocks_conn, sql, args).map_err(Error::DBError) } /// Load up the metadata on a microblock stream (but don't get the data itself) @@ -2900,9 +2890,9 @@ impl StacksChainState { ) -> Result, Error> { let sql = "SELECT * FROM staging_microblocks WHERE index_block_hash = ?1 ORDER BY sequence" .to_string(); - let args = [parent_index_block_hash as &dyn ToSql]; + let args = params![parent_index_block_hash]; let microblock_info = - query_rows::(blocks_conn, &sql, &args).map_err(Error::DBError)?; + query_rows::(blocks_conn, &sql, args).map_err(Error::DBError)?; Ok(microblock_info) } @@ -2942,7 +2932,7 @@ impl StacksChainState { ) -> Result { let sql = "SELECT 1 FROM staging_blocks WHERE orphaned = 0 AND processed = 0 AND height >= ?1 AND arrival_time >= ?2"; - let args: &[&dyn ToSql] = &[&u64_to_sql(height)?, &u64_to_sql(deadline)?]; + let args = params![u64_to_sql(height)?, u64_to_sql(deadline)?]; let res = conn .query_row(sql, args, |_r| Ok(())) .optional() @@ -2958,7 +2948,7 @@ impl StacksChainState { ) -> Result, Error> { let sql = "SELECT * FROM staging_blocks WHERE orphaned = 0 AND processed = 0 AND arrival_time >= ?1 ORDER BY height DESC LIMIT 1"; - let res = query_row(conn, sql, &[u64_to_sql(deadline)?])?; + let res = query_row(conn, sql, params![u64_to_sql(deadline)?])?; Ok(res) } @@ -3172,7 +3162,7 @@ impl StacksChainState { parent_block_hash: &BlockHeaderHash, ) -> Result { let sql = "SELECT 1 FROM epoch_transitions WHERE block_id = ?1"; - let args: &[&dyn ToSql] = &[&StacksBlockHeader::make_index_block_hash( + let args = params![StacksBlockHeader::make_index_block_hash( parent_consensus_hash, parent_block_hash, )]; @@ -3836,7 +3826,7 @@ impl StacksChainState { end_height: u64, ) -> Result, Error> { let sql = "SELECT processed_time - arrival_time FROM staging_blocks WHERE processed = 1 AND height >= ?1 AND height < ?2"; - let args: &[&dyn ToSql] = &[&u64_to_sql(start_height)?, &u64_to_sql(end_height)?]; + let args = params![u64_to_sql(start_height)?, u64_to_sql(end_height)?]; let list = query_rows::(blocks_conn, &sql, args)?; Ok(list) } @@ -3849,7 +3839,7 @@ impl StacksChainState { end_height: u64, ) -> Result, Error> { let sql = "SELECT download_time FROM staging_blocks WHERE height >= ?1 AND height < ?2"; - let args: &[&dyn ToSql] = &[&u64_to_sql(start_height)?, &u64_to_sql(end_height)?]; + let args = params![u64_to_sql(start_height)?, u64_to_sql(end_height)?]; let list = query_rows::(blocks_conn, &sql, args)?; Ok(list) } @@ -3931,9 +3921,9 @@ impl StacksChainState { // not the first-ever block. Does this connect to a previously-accepted // block in the headers database? let hdr_sql = "SELECT * FROM block_headers WHERE block_hash = ?1 AND consensus_hash = ?2".to_string(); - let hdr_args: &[&dyn ToSql] = &[ - &candidate.parent_anchored_block_hash, - &candidate.parent_consensus_hash, + let hdr_args = params![ + candidate.parent_anchored_block_hash, + candidate.parent_consensus_hash, ]; let hdr_row = query_row_panic::( blocks_tx, @@ -4086,7 +4076,10 @@ impl StacksChainState { let mut current_epoch = stacks_parent_epoch; while current_epoch != sortition_epoch.epoch_id { applied = true; - info!("Applying epoch transition"; "new_epoch_id" => %sortition_epoch.epoch_id, "old_epoch_id" => %current_epoch); + info!("Applying epoch transition"; + "new_epoch_id" => %sortition_epoch.epoch_id, + "old_epoch_id" => %current_epoch + ); // this assertion failing means that the _parent_ block was invalid: this is bad and should panic. assert!(current_epoch < sortition_epoch.epoch_id, "The SortitionDB believes the epoch is earlier than this Stacks block's parent: sortition db epoch = {}, current epoch = {}", sortition_epoch.epoch_id, current_epoch); // time for special cases: @@ -4132,6 +4125,9 @@ impl StacksChainState { Ok((applied, receipts)) } + // TODO: add tests from mutation testing results #4856 + // Or keep the skip and remove the comment + #[cfg_attr(test, mutants::skip)] /// Process any Stacking-related bitcoin operations /// that haven't been processed in this Stacks fork yet. pub fn process_stacking_ops( @@ -4196,7 +4192,14 @@ impl StacksChainState { "burn_block" => %burn_header_hash, "contract_call_ecode" => %resp.data); } else { - debug!("Processed StackStx burnchain op"; "amount_ustx" => stacked_ustx, "num_cycles" => num_cycles, "burn_block_height" => block_height, "sender" => %sender, "reward_addr" => %reward_addr, "txid" => %txid); + debug!("Processed StackStx burnchain op"; + "amount_ustx" => stacked_ustx, + "num_cycles" => num_cycles, + "burn_block_height" => block_height, + "sender" => %sender, + "reward_addr" => %reward_addr, + "txid" => %txid + ); } let mut execution_cost = clarity_tx.cost_so_far(); execution_cost @@ -4229,7 +4232,8 @@ impl StacksChainState { info!("StackStx burn op processing error."; "error" => %format!("{:?}", e), "txid" => %txid, - "burn_block" => %burn_header_hash); + "burn_block_hash" => %burn_header_hash + ); } }; } @@ -4237,6 +4241,8 @@ impl StacksChainState { all_receipts } + // TODO: add tests from mutation testing results #4857 + #[cfg_attr(test, mutants::skip)] pub fn collect_pox_4_stacking_args(op: &StackStxOp) -> Result, String> { let signer_key = match op.signer_key { Some(signer_key) => match Value::buff_from(signer_key.as_bytes().to_vec()) { @@ -4313,9 +4319,10 @@ impl StacksChainState { } Err(e) => { info!("TransferStx burn op processing error."; - "error" => ?e, - "txid" => %txid, - "burn_block" => %burn_header_hash); + "error" => ?e, + "txid" => %txid, + "burn_block_hash" => %burn_header_hash + ); None } } @@ -4389,13 +4396,22 @@ impl StacksChainState { if let Value::Response(ref resp) = value { if !resp.committed { info!("DelegateStx burn op rejected by PoX contract."; - "txid" => %txid, - "burn_block" => %burn_header_hash, - "contract_call_ecode" => %resp.data); + "txid" => %txid, + "burn_block_hash" => %burn_header_hash, + "contract_call_ecode" => %resp.data); } else { let reward_addr_fmt = format!("{:?}", reward_addr); let delegate_to_fmt = format!("{:?}", delegate_to); - info!("Processed DelegateStx burnchain op"; "resp" => %resp.data, "amount_ustx" => delegated_ustx, "delegate_to" => delegate_to_fmt, "until_burn_height" => until_burn_height, "burn_block_height" => block_height, "sender" => %sender, "reward_addr" => reward_addr_fmt, "txid" => %txid); + info!("Processed DelegateStx burnchain op"; + "resp" => %resp.data, + "amount_ustx" => delegated_ustx, + "delegate_to" => delegate_to_fmt, + "until_burn_height" => until_burn_height, + "burn_block_height" => block_height, + "sender" => %sender, + "reward_addr" => reward_addr_fmt, + "txid" => %txid + ); } let mut execution_cost = clarity_tx.cost_so_far(); execution_cost @@ -4428,7 +4444,7 @@ impl StacksChainState { info!("DelegateStx burn op processing error."; "error" => %format!("{:?}", e), "txid" => %txid, - "burn_block" => %burn_header_hash); + "burn_header_hash" => %burn_header_hash); } }; } @@ -4487,7 +4503,7 @@ impl StacksChainState { if !resp.committed { info!("VoteForAggregateKey burn op rejected by signers-voting contract."; "txid" => %txid, - "burn_block" => %burn_header_hash, + "burn_block_hash" => %burn_header_hash, "contract_call_ecode" => %resp.data); } else { let aggregate_key_fmt = format!("{:?}", aggregate_key.to_hex()); @@ -4536,7 +4552,7 @@ impl StacksChainState { info!("VoteForAggregateKey burn op processing error."; "error" => %format!("{:?}", e), "txid" => %txid, - "burn_block" => %burn_header_hash); + "burn_block_hash" => %burn_header_hash); } }; } @@ -4584,10 +4600,10 @@ impl StacksChainState { // strictly speaking this check is defensive. It will never be the case // that a `miner_reward` has a `recipient_contract` that is `Some(..)` // unless the block was mined in Epoch 2.1. But you can't be too - // careful... + // careful... if evaluated_epoch >= StacksEpochId::Epoch21 { // in 2.1 or later, the coinbase may optionally specify a contract into - // which the tokens get sent. If this is not given, then they are sent + // which the tokens get sent. If this is not given, then they are sent // to the miner address. miner_reward.recipient.clone() } @@ -6021,6 +6037,8 @@ impl StacksChainState { Ok(next_microblocks) } + // TODO: add tests from mutation testing results #4858 + #[cfg_attr(test, mutants::skip)] /// Find and process the next staging block. /// Return the next chain tip if we processed this block, or None if we couldn't. /// Return a poison microblock transaction payload if the microblock stream contains a @@ -6525,7 +6543,7 @@ impl StacksChainState { let (consensus_hash, block_bhh) = SortitionDB::get_canonical_stacks_chain_tip_hash(sortdb.conn())?; let sql = "SELECT * FROM staging_blocks WHERE processed = 1 AND orphaned = 0 AND consensus_hash = ?1 AND anchored_block_hash = ?2"; - let args: &[&dyn ToSql] = &[&consensus_hash, &block_bhh]; + let args = params![consensus_hash, block_bhh]; query_row(&self.db(), sql, args).map_err(Error::DBError) } @@ -6534,7 +6552,7 @@ impl StacksChainState { let (consensus_hash, block_bhh) = SortitionDB::get_canonical_stacks_chain_tip_hash(sortdb.conn())?; let sql = "SELECT * FROM staging_blocks WHERE processed = 1 AND orphaned = 0 AND consensus_hash = ?1 AND anchored_block_hash = ?2"; - let args: &[&dyn ToSql] = &[&consensus_hash, &block_bhh]; + let args = params![consensus_hash, block_bhh]; let Some(staging_block): Option = query_row(&self.db(), sql, args).map_err(Error::DBError)? else { @@ -6547,7 +6565,7 @@ impl StacksChainState { pub fn get_stacks_chain_tips_at_height(&self, height: u64) -> Result, Error> { let sql = "SELECT * FROM staging_blocks WHERE processed = 1 AND orphaned = 0 AND height = ?1"; - let args: &[&dyn ToSql] = &[&u64_to_sql(height)?]; + let args = params![u64_to_sql(height)?]; query_rows(&self.db(), sql, args).map_err(Error::DBError) } @@ -6557,9 +6575,9 @@ impl StacksChainState { staging_block: &StagingBlock, ) -> Result, Error> { let sql = "SELECT * FROM staging_blocks WHERE processed = 1 AND orphaned = 0 AND consensus_hash = ?1 AND anchored_block_hash = ?2"; - let args: &[&dyn ToSql] = &[ - &staging_block.parent_consensus_hash, - &staging_block.parent_anchored_block_hash, + let args = params![ + staging_block.parent_consensus_hash, + staging_block.parent_anchored_block_hash, ]; query_row(&self.db(), sql, args).map_err(Error::DBError) } @@ -6571,7 +6589,7 @@ impl StacksChainState { block_hash: &BlockHeaderHash, ) -> Result, Error> { let sql = "SELECT height FROM staging_blocks WHERE consensus_hash = ?1 AND anchored_block_hash = ?2"; - let args: &[&dyn ToSql] = &[consensus_hash, block_hash]; + let args = params![consensus_hash, block_hash]; query_row(&self.db(), sql, args).map_err(Error::DBError) } @@ -6650,7 +6668,9 @@ impl StacksChainState { // 1: must parse (done) // 2: it must be validly signed. - StacksChainState::process_transaction_precheck(&chainstate_config, &tx) + let epoch = clarity_connection.get_epoch().clone(); + + StacksChainState::process_transaction_precheck(&chainstate_config, &tx, epoch) .map_err(|e| MemPoolRejection::FailedToValidate(e))?; // 3: it must pay a tx fee @@ -6663,7 +6683,14 @@ impl StacksChainState { )); } - // 4: the account nonces must be correct + // 4: check if transaction is valid in the current epoch + if !StacksBlock::validate_transaction_static_epoch(tx, epoch) { + return Err(MemPoolRejection::Other( + "Transaction is not supported in this epoch".to_string(), + )); + } + + // 5: the account nonces must be correct let (origin, payer) = match StacksChainState::check_transaction_nonces(clarity_connection, &tx, true) { Ok(x) => x, @@ -6725,7 +6752,7 @@ impl StacksChainState { }, )?; - // 5: the paying account must have enough funds + // 6: the paying account must have enough funds if !payer.stx_balance.can_transfer_at_burn_block( u128::from(fee), block_height, @@ -6751,7 +6778,7 @@ impl StacksChainState { } } - // 6: payload-specific checks + // 7: payload-specific checks match &tx.payload { TransactionPayload::TokenTransfer(addr, amount, _memo) => { // version byte matches? @@ -6854,7 +6881,7 @@ impl StacksChainState { } if let Some(_version) = version_opt.as_ref() { - if clarity_connection.get_epoch() < StacksEpochId::Epoch21 { + if epoch < StacksEpochId::Epoch21 { return Err(MemPoolRejection::Other( "Versioned smart contract transactions are not supported in this epoch" .to_string(), @@ -10263,7 +10290,7 @@ pub mod test { ); let anchored_block = StacksBlockBuilder::build_anchored_block( chainstate, - &sortdb.index_conn(), + &sortdb.index_handle_at_tip(), &mut mempool, &parent_tip, tip.total_burn, @@ -10515,7 +10542,7 @@ pub mod test { let anchored_block = StacksBlockBuilder::build_anchored_block( chainstate, - &sortdb.index_conn(), + &sortdb.index_handle_at_tip(), &mut mempool, &parent_tip, tip.total_burn, @@ -11071,7 +11098,7 @@ pub mod test { let anchored_block = StacksBlockBuilder::build_anchored_block( chainstate, - &sortdb.index_conn(), + &sortdb.index_handle_at_tip(), &mut mempool, &parent_tip, tip.total_burn, @@ -11236,7 +11263,7 @@ pub mod test { let tip_hash = StacksBlockHeader::make_index_block_hash(&consensus_hash, &block_bhh); let account = peer .chainstate() - .with_read_only_clarity_tx(&sortdb.index_conn(), &tip_hash, |conn| { + .with_read_only_clarity_tx(&sortdb.index_handle_at_tip(), &tip_hash, |conn| { StacksChainState::get_account(conn, &addr.to_account_principal()) }) .unwrap(); @@ -11394,7 +11421,7 @@ pub mod test { let anchored_block = StacksBlockBuilder::build_anchored_block( chainstate, - &sortdb.index_conn(), + &sortdb.index_handle_at_tip(), &mut mempool, &parent_tip, tip.total_burn, @@ -11917,9 +11944,12 @@ pub mod test { let (consensus_hash, block_bhh) = SortitionDB::get_canonical_stacks_chain_tip_hash(sortdb.conn()).unwrap(); let tip_hash = StacksBlockHeader::make_index_block_hash(&consensus_hash, &block_bhh); + let iconn = sortdb + .index_handle_at_block(peer.chainstate(), &tip_hash) + .unwrap(); let account = peer .chainstate() - .with_read_only_clarity_tx(&sortdb.index_conn(), &tip_hash, |conn| { + .with_read_only_clarity_tx(&iconn, &tip_hash, |conn| { StacksChainState::get_account(conn, &addr.to_account_principal()) }) .unwrap(); diff --git a/stackslib/src/chainstate/stacks/db/headers.rs b/stackslib/src/chainstate/stacks/db/headers.rs index 0079f3d7d5c..98f41bf9c7d 100644 --- a/stackslib/src/chainstate/stacks/db/headers.rs +++ b/stackslib/src/chainstate/stacks/db/headers.rs @@ -139,29 +139,29 @@ impl StacksChainState { assert!(block_height < (i64::MAX as u64)); - let args: &[&dyn ToSql] = &[ - &header.version, - &total_burn_str, - &total_work_str, - &header.proof, - &header.parent_block, - &header.parent_microblock, - &header.parent_microblock_sequence, - &header.tx_merkle_root, - &header.state_index_root, - &header.microblock_pubkey_hash, - &block_hash, - &index_block_hash, - &consensus_hash, - &burn_header_hash, - &(burn_header_height as i64), - &(burn_header_timestamp as i64), - &(block_height as i64), - &index_root, + let args = params![ + header.version, + total_burn_str, + total_work_str, + header.proof, + header.parent_block, + header.parent_microblock, + header.parent_microblock_sequence, + header.tx_merkle_root, + header.state_index_root, + header.microblock_pubkey_hash, + block_hash, + index_block_hash, + consensus_hash, + burn_header_hash, + (burn_header_height as i64), + (burn_header_timestamp as i64), + (block_height as i64), + index_root, anchored_block_cost, - &block_size_str, + block_size_str, parent_id, - &u64_to_sql(affirmation_weight)?, + u64_to_sql(affirmation_weight)?, ]; tx.execute("INSERT INTO block_headers \ @@ -209,7 +209,7 @@ impl StacksChainState { block_hash: &BlockHeaderHash, ) -> Result { let sql = "SELECT 1 FROM block_headers WHERE consensus_hash = ?1 AND block_hash = ?2"; - let args: &[&dyn ToSql] = &[&consensus_hash, &block_hash]; + let args = params![consensus_hash, block_hash]; match conn.query_row(sql, args, |_| Ok(true)) { Ok(_) => Ok(true), Err(rusqlite::Error::QueryReturnedNoRows) => Ok(false), @@ -225,7 +225,7 @@ impl StacksChainState { block_hash: &BlockHeaderHash, ) -> Result, Error> { let sql = "SELECT * FROM block_headers WHERE consensus_hash = ?1 AND block_hash = ?2"; - let args: &[&dyn ToSql] = &[&consensus_hash, &block_hash]; + let args = params![consensus_hash, block_hash]; query_row_panic(conn, sql, args, || { "FATAL: multiple rows for the same block hash".to_string() }) @@ -319,7 +319,7 @@ impl StacksChainState { pub fn get_genesis_header_info(conn: &Connection) -> Result { // by construction, only one block can have height 0 in this DB let sql = "SELECT * FROM block_headers WHERE consensus_hash = ?1 AND block_height = 0"; - let args: &[&dyn ToSql] = &[&FIRST_BURNCHAIN_CONSENSUS_HASH]; + let args = params![FIRST_BURNCHAIN_CONSENSUS_HASH]; let row_opt = query_row(conn, sql, args)?; Ok(row_opt.expect("BUG: no genesis header info")) } @@ -330,7 +330,7 @@ impl StacksChainState { block_id: &StacksBlockId, ) -> Result, Error> { let sql = "SELECT parent_block_id FROM block_headers WHERE index_block_hash = ?1 LIMIT 1"; - let args: &[&dyn ToSql] = &[block_id]; + let args = params![block_id]; let mut rows = query_row_columns::(conn, sql, args, "parent_block_id")?; Ok(rows.pop()) } @@ -338,7 +338,7 @@ impl StacksChainState { /// Is this block present and processed? pub fn has_stacks_block(conn: &Connection, block_id: &StacksBlockId) -> Result { let sql = "SELECT 1 FROM block_headers WHERE index_block_hash = ?1 LIMIT 1"; - let args: &[&dyn ToSql] = &[block_id]; + let args = params![block_id]; Ok(conn .query_row(sql, args, |_r| Ok(())) .optional() @@ -383,7 +383,7 @@ impl StacksChainState { ) -> Result, Error> { let qry = "SELECT * FROM block_headers WHERE block_height = ?1 AND affirmation_weight = ?2 ORDER BY burn_header_height DESC"; - let args: &[&dyn ToSql] = &[&u64_to_sql(height)?, &u64_to_sql(affirmation_weight)?]; + let args = params![u64_to_sql(height)?, u64_to_sql(affirmation_weight)?]; query_rows(conn, qry, args).map_err(|e| e.into()) } diff --git a/stackslib/src/chainstate/stacks/db/mod.rs b/stackslib/src/chainstate/stacks/db/mod.rs index 374fc11ae13..356b117b8bb 100644 --- a/stackslib/src/chainstate/stacks/db/mod.rs +++ b/stackslib/src/chainstate/stacks/db/mod.rs @@ -36,11 +36,12 @@ use clarity::vm::types::TupleData; use clarity::vm::{SymbolicExpression, Value}; use lazy_static::lazy_static; use rusqlite::types::ToSql; -use rusqlite::{Connection, OpenFlags, OptionalExtension, Row, Transaction, NO_PARAMS}; +use rusqlite::{params, Connection, OpenFlags, OptionalExtension, Row, Transaction}; use serde::de::Error as de_Error; use serde::Deserialize; use stacks_common::codec::{read_next, write_next, StacksMessageCodec}; use stacks_common::types::chainstate::{StacksAddress, StacksBlockId, TrieHash}; +use stacks_common::types::sqlite::NO_PARAMS; use stacks_common::util; use stacks_common::util::hash::{hex_bytes, to_hex}; @@ -53,7 +54,8 @@ use crate::chainstate::burn::operations::{ use crate::chainstate::burn::{ConsensusHash, ConsensusHashExtensions}; use crate::chainstate::nakamoto::{ HeaderTypeNames, NakamotoBlock, NakamotoBlockHeader, NakamotoChainState, - NakamotoStagingBlocksConn, NAKAMOTO_CHAINSTATE_SCHEMA_1, + NakamotoStagingBlocksConn, NAKAMOTO_CHAINSTATE_SCHEMA_1, NAKAMOTO_CHAINSTATE_SCHEMA_2, + NAKAMOTO_CHAINSTATE_SCHEMA_3, }; use crate::chainstate::stacks::address::StacksAddressExtensions; use crate::chainstate::stacks::boot::*; @@ -196,6 +198,9 @@ pub struct StacksHeaderInfo { pub burn_header_timestamp: u64, /// Size of the block corresponding to `anchored_header` in bytes pub anchored_block_size: u64, + /// The burnchain tip that is passed to Clarity while processing this block. + /// This should always be `Some()` for Nakamoto blocks and `None` for 2.x blocks + pub burn_view: Option, } #[derive(Debug, Clone, PartialEq)] @@ -287,23 +292,20 @@ pub struct DBConfig { impl DBConfig { pub fn supports_epoch(&self, epoch_id: StacksEpochId) -> bool { + let version_u32: u32 = self.version.parse().unwrap_or_else(|e| { + error!("Failed to parse Stacks chainstate version as u32: {e}"); + 0 + }); match epoch_id { StacksEpochId::Epoch10 => true, - StacksEpochId::Epoch20 => { - self.version == "1" - || self.version == "2" - || self.version == "3" - || self.version == "4" - } - StacksEpochId::Epoch2_05 => { - self.version == "2" || self.version == "3" || self.version == "4" - } - StacksEpochId::Epoch21 => self.version == "3" || self.version == "4", - StacksEpochId::Epoch22 => self.version == "3" || self.version == "4", - StacksEpochId::Epoch23 => self.version == "3" || self.version == "4", - StacksEpochId::Epoch24 => self.version == "3" || self.version == "4", - StacksEpochId::Epoch25 => self.version == "3" || self.version == "4", - StacksEpochId::Epoch30 => self.version == "3" || self.version == "4", + StacksEpochId::Epoch20 => version_u32 >= 1 && version_u32 <= 6, + StacksEpochId::Epoch2_05 => version_u32 >= 2 && version_u32 <= 6, + StacksEpochId::Epoch21 => version_u32 >= 3 && version_u32 <= 6, + StacksEpochId::Epoch22 => version_u32 >= 3 && version_u32 <= 6, + StacksEpochId::Epoch23 => version_u32 >= 3 && version_u32 <= 6, + StacksEpochId::Epoch24 => version_u32 >= 3 && version_u32 <= 6, + StacksEpochId::Epoch25 => version_u32 >= 3 && version_u32 <= 6, + StacksEpochId::Epoch30 => version_u32 >= 3 && version_u32 <= 6, } } } @@ -371,6 +373,7 @@ impl StacksHeaderInfo { consensus_hash: ConsensusHash::empty(), burn_header_timestamp: 0, anchored_block_size: 0, + burn_view: None, } } @@ -390,6 +393,7 @@ impl StacksHeaderInfo { consensus_hash: FIRST_BURNCHAIN_CONSENSUS_HASH.clone(), burn_header_timestamp: first_burnchain_block_timestamp, anchored_block_size: 0, + burn_view: None, } } @@ -436,15 +440,21 @@ impl FromRow for StacksHeaderInfo { .parse::() .map_err(|_| db_error::ParseError)?; + let header_type: HeaderTypeNames = row + .get("header_type") + .unwrap_or_else(|_e| HeaderTypeNames::Epoch2); let stacks_header: StacksBlockHeaderTypes = { - let header_type: HeaderTypeNames = row - .get("header_type") - .unwrap_or_else(|_e| HeaderTypeNames::Epoch2); match header_type { HeaderTypeNames::Epoch2 => StacksBlockHeader::from_row(row)?.into(), HeaderTypeNames::Nakamoto => NakamotoBlockHeader::from_row(row)?.into(), } }; + let burn_view = { + match header_type { + HeaderTypeNames::Epoch2 => None, + HeaderTypeNames::Nakamoto => Some(ConsensusHash::from_column(row, "burn_view")?), + } + }; if block_height != stacks_header.height() { return Err(db_error::ParseError); @@ -460,6 +470,7 @@ impl FromRow for StacksHeaderInfo { burn_header_height: burn_header_height as u32, burn_header_timestamp, anchored_block_size, + burn_view, }) } } @@ -640,7 +651,7 @@ impl<'a> ChainstateTx<'a> { let txid = tx_event.transaction.txid(); let tx_hex = tx_event.transaction.serialize_to_dbstring(); let result = tx_event.result.to_string(); - let params: &[&dyn ToSql] = &[&txid, block_id, &tx_hex, &result]; + let params = params![txid, block_id, tx_hex, result]; if let Err(e) = self.tx.tx().execute(insert, params) { warn!("Failed to log TX: {}", e); } @@ -668,7 +679,7 @@ impl<'a> DerefMut for ChainstateTx<'a> { } } -pub const CHAINSTATE_VERSION: &'static str = "4"; +pub const CHAINSTATE_VERSION: &'static str = "6"; const CHAINSTATE_INITIAL_SCHEMA: &'static [&'static str] = &[ "PRAGMA foreign_keys = ON;", @@ -994,11 +1005,7 @@ impl StacksChainState { } tx.execute( "INSERT INTO db_config (version,mainnet,chain_id) VALUES (?1,?2,?3)", - &[ - &"1".to_string(), - &(if mainnet { 1 } else { 0 }) as &dyn ToSql, - &chain_id as &dyn ToSql, - ], + params!["1".to_string(), (if mainnet { 1 } else { 0 }), chain_id,], )?; if migrate { @@ -1079,6 +1086,20 @@ impl StacksChainState { tx.execute_batch(cmd)?; } } + "4" => { + // migrate to nakamoto 2 + info!("Migrating chainstate schema from version 4 to 5: fix nakamoto tenure typo"); + for cmd in NAKAMOTO_CHAINSTATE_SCHEMA_2.iter() { + tx.execute_batch(cmd)?; + } + } + "5" => { + // migrate to nakamoto 3 + info!("Migrating chainstate schema from version 5 to 6: adds height_in_tenure field"); + for cmd in NAKAMOTO_CHAINSTATE_SCHEMA_3.iter() { + tx.execute_batch(cmd)?; + } + } _ => { error!( "Invalid chain state database: expected version = {}, got {}", @@ -1627,7 +1648,7 @@ impl StacksChainState { { // add a block header entry for the boot code - let mut tx = chainstate.index_tx_begin()?; + let mut tx = chainstate.index_tx_begin(); let parent_hash = StacksBlockId::sentinel(); let first_index_hash = StacksBlockHeader::make_index_block_hash( &FIRST_BURNCHAIN_CONSENSUS_HASH, @@ -1846,12 +1867,12 @@ impl StacksChainState { /// Begin a transaction against the (indexed) stacks chainstate DB. /// Does not create a Clarity instance. - pub fn index_tx_begin<'a>(&'a mut self) -> Result, Error> { - Ok(StacksDBTx::new(&mut self.state_index, ())) + pub fn index_tx_begin<'a>(&'a mut self) -> StacksDBTx<'a> { + StacksDBTx::new(&mut self.state_index, ()) } - pub fn index_conn<'a>(&'a self) -> Result, Error> { - Ok(StacksDBConn::new(&self.state_index, ())) + pub fn index_conn<'a>(&'a self) -> StacksDBConn<'a> { + StacksDBConn::new(&self.state_index, ()) } /// Begin a transaction against the underlying DB @@ -1887,7 +1908,7 @@ impl StacksChainState { ) -> Value { let result = self.clarity_state.eval_read_only( parent_id_bhh, - &HeadersDBConn(self.state_index.sqlite_conn()), + &HeadersDBConn(StacksDBConn::new(&self.state_index, ())), burn_dbconn, contract, code, @@ -1906,7 +1927,7 @@ impl StacksChainState { ) -> Result { self.clarity_state.eval_read_only( parent_id_bhh, - &HeadersDBConn(self.state_index.sqlite_conn()), + &HeadersDBConn(StacksDBConn::new(&self.state_index, ())), burn_dbconn, contract, code, @@ -1925,7 +1946,7 @@ impl StacksChainState { function: &str, args: &[Value], ) -> Result { - let headers_db = HeadersDBConn(self.state_index.sqlite_conn()); + let headers_db = HeadersDBConn(StacksDBConn::new(&self.state_index, ())); let mut conn = self.clarity_state.read_only_connection_checked( parent_id_bhh, &headers_db, @@ -2440,7 +2461,7 @@ impl StacksChainState { index_block_hash: &StacksBlockId, ) -> Result, Error> { let sql = "SELECT txids FROM burnchain_txids WHERE index_block_hash = ?1"; - let args: &[&dyn ToSql] = &[index_block_hash]; + let args = params![index_block_hash]; let txids = conn .query_row(sql, args, |r| { @@ -2520,7 +2541,7 @@ impl StacksChainState { let txids_json = serde_json::to_string(&txids).expect("FATAL: could not serialize Vec"); let sql = "INSERT INTO burnchain_txids (index_block_hash, txids) VALUES (?1, ?2)"; - let args: &[&dyn ToSql] = &[index_block_hash, &txids_json]; + let args = params![index_block_hash, &txids_json]; tx.execute(sql, args)?; Ok(()) } @@ -2594,6 +2615,7 @@ impl StacksChainState { burn_header_height: new_burnchain_height, burn_header_timestamp: new_burnchain_timestamp, anchored_block_size: anchor_block_size, + burn_view: None, }; StacksChainState::insert_stacks_block_header( @@ -2649,7 +2671,7 @@ impl StacksChainState { if applied_epoch_transition { debug!("Block {} applied an epoch transition", &index_block_hash); let sql = "INSERT INTO epoch_transitions (block_id) VALUES (?)"; - let args: &[&dyn ToSql] = &[&index_block_hash]; + let args = params![&index_block_hash]; headers_tx.deref_mut().execute(sql, args)?; } @@ -2926,4 +2948,23 @@ pub mod test { MAINNET_2_0_GENESIS_ROOT_HASH ); } + + #[test] + fn latest_db_version_supports_latest_epoch() { + let db = DBConfig { + version: CHAINSTATE_VERSION.to_string(), + mainnet: true, + chain_id: CHAIN_ID_MAINNET, + }; + assert!(db.supports_epoch(StacksEpochId::latest())); + } + + #[test] + fn test_sqlite_version() { + let chainstate = instantiate_chainstate(false, 0x80000000, function_name!()); + assert_eq!( + query_row(chainstate.db(), "SELECT sqlite_version()", NO_PARAMS).unwrap(), + Some("3.45.0".to_string()) + ); + } } diff --git a/stackslib/src/chainstate/stacks/db/transactions.rs b/stackslib/src/chainstate/stacks/db/transactions.rs index 9297252d155..35ba5326678 100644 --- a/stackslib/src/chainstate/stacks/db/transactions.rs +++ b/stackslib/src/chainstate/stacks/db/transactions.rs @@ -521,8 +521,18 @@ impl StacksChainState { pub fn process_transaction_precheck( config: &DBConfig, tx: &StacksTransaction, + epoch_id: StacksEpochId, ) -> Result<(), Error> { // valid auth? + if !tx.auth.is_supported_in_epoch(epoch_id) { + let msg = format!( + "Invalid tx {}: authentication mode not supported in Epoch {epoch_id}", + tx.txid() + ); + warn!("{msg}"); + + return Err(Error::InvalidStacksTransaction(msg, false)); + } tx.verify().map_err(Error::NetError)?; // destined for us? @@ -970,14 +980,14 @@ impl StacksChainState { // Their presence in this variant makes the transaction invalid. if tx.post_conditions.len() > 0 { let msg = format!("Invalid Stacks transaction: TokenTransfer transactions do not support post-conditions"); - warn!("{}", &msg); + info!("{}", &msg); return Err(Error::InvalidStacksTransaction(msg, false)); } if *addr == origin_account.principal { let msg = format!("Invalid TokenTransfer: address tried to send to itself"); - warn!("{}", &msg); + info!("{}", &msg); return Err(Error::InvalidStacksTransaction(msg, false)); } @@ -1088,7 +1098,7 @@ impl StacksChainState { if epoch_id >= StacksEpochId::Epoch21 { // in 2.1 and later, this is a permitted runtime error. take the // fee from the payer and keep the tx. - warn!("Contract-call encountered an analysis error at runtime"; + info!("Contract-call encountered an analysis error at runtime"; "txid" => %tx.txid(), "origin" => %origin_account.principal, "origin_nonce" => %origin_account.nonce, @@ -1163,7 +1173,7 @@ impl StacksChainState { // (because this can be checked statically by the miner before mining the block). if StacksChainState::get_contract(clarity_tx, &contract_id)?.is_some() { let msg = format!("Duplicate contract '{}'", &contract_id); - warn!("{}", &msg); + info!("{}", &msg); return Err(Error::InvalidStacksTransaction(msg, false)); } @@ -1225,7 +1235,7 @@ impl StacksChainState { .sub(&cost_before) .expect("BUG: total block cost decreased"); - warn!( + info!( "Runtime error in contract analysis for {}: {:?}", &contract_id, &other_error; "txid" => %tx.txid(), @@ -1329,7 +1339,7 @@ impl StacksChainState { if epoch_id >= StacksEpochId::Epoch21 { // in 2.1 and later, this is a permitted runtime error. take the // fee from the payer and keep the tx. - warn!("Smart-contract encountered an analysis error at runtime"; + info!("Smart-contract encountered an analysis error at runtime"; "txid" => %tx.txid(), "contract" => %contract_id, "code" => %contract_code_str, @@ -1380,7 +1390,7 @@ impl StacksChainState { // Their presence in this variant makes the transaction invalid. if tx.post_conditions.len() > 0 { let msg = format!("Invalid Stacks transaction: PoisonMicroblock transactions do not support post-conditions"); - warn!("{}", &msg); + info!("{}", &msg); return Err(Error::InvalidStacksTransaction(msg, false)); } @@ -1412,7 +1422,7 @@ impl StacksChainState { // Their presence in this variant makes the transaction invalid. if tx.post_conditions.len() > 0 { let msg = format!("Invalid Stacks transaction: TenureChange transactions do not support post-conditions"); - warn!("{msg}"); + info!("{msg}"); return Err(Error::InvalidStacksTransaction(msg, false)); } @@ -1467,7 +1477,7 @@ impl StacksChainState { debug!("Process transaction {} ({})", tx.txid(), tx.payload.name()); let epoch = clarity_block.get_epoch(); - StacksChainState::process_transaction_precheck(&clarity_block.config, tx)?; + StacksChainState::process_transaction_precheck(&clarity_block.config, tx, epoch)?; // what version of Clarity did the transaction caller want? And, is it valid now? let clarity_version = StacksChainState::get_tx_clarity_version(clarity_block, tx)?; @@ -1475,7 +1485,7 @@ impl StacksChainState { // requires 2.1 and higher if clarity_block.get_epoch() < StacksEpochId::Epoch21 { let msg = format!("Invalid transaction {}: asks for Clarity2, but not in Stacks epoch 2.1 or later", tx.txid()); - warn!("{}", &msg); + info!("{}", &msg); return Err(Error::InvalidStacksTransaction(msg, false)); } } @@ -1716,7 +1726,7 @@ pub mod test { for (dbi, burn_db) in ALL_BURN_DBS.iter().enumerate() { let mut conn = chainstate.block_begin( - burn_db, + *burn_db, &FIRST_BURNCHAIN_CONSENSUS_HASH, &FIRST_STACKS_BLOCK_HASH, &ConsensusHash([(dbi + 1) as u8; 20]), @@ -1946,7 +1956,7 @@ pub mod test { for (dbi, burn_db) in ALL_BURN_DBS.iter().enumerate() { let mut conn = chainstate.block_begin( - burn_db, + *burn_db, &FIRST_BURNCHAIN_CONSENSUS_HASH, &FIRST_STACKS_BLOCK_HASH, &ConsensusHash([(dbi + 1) as u8; 20]), @@ -2060,7 +2070,7 @@ pub mod test { for (dbi, burn_db) in ALL_BURN_DBS.iter().enumerate() { let mut conn = chainstate.block_begin( - burn_db, + *burn_db, &FIRST_BURNCHAIN_CONSENSUS_HASH, &FIRST_STACKS_BLOCK_HASH, &ConsensusHash([(dbi + 1) as u8; 20]), @@ -2151,7 +2161,7 @@ pub mod test { for (dbi, burn_db) in ALL_BURN_DBS.iter().enumerate() { let mut conn = chainstate.block_begin( - burn_db, + *burn_db, &FIRST_BURNCHAIN_CONSENSUS_HASH, &FIRST_STACKS_BLOCK_HASH, &ConsensusHash([(dbi + 1) as u8; 20]), @@ -2214,7 +2224,7 @@ pub mod test { for (dbi, burn_db) in ALL_BURN_DBS.iter().enumerate() { let mut conn = chainstate.block_begin( - burn_db, + *burn_db, &FIRST_BURNCHAIN_CONSENSUS_HASH, &FIRST_STACKS_BLOCK_HASH, &ConsensusHash([(dbi + 1) as u8; 20]), @@ -2322,7 +2332,7 @@ pub mod test { for (dbi, burn_db) in ALL_BURN_DBS.iter().enumerate() { let mut conn = chainstate.block_begin( - burn_db, + *burn_db, &FIRST_BURNCHAIN_CONSENSUS_HASH, &FIRST_STACKS_BLOCK_HASH, &ConsensusHash([(dbi + 1) as u8; 20]), @@ -2413,7 +2423,7 @@ pub mod test { for (dbi, burn_db) in ALL_BURN_DBS.iter().enumerate() { let mut conn = chainstate.block_begin( - burn_db, + *burn_db, &FIRST_BURNCHAIN_CONSENSUS_HASH, &FIRST_STACKS_BLOCK_HASH, &ConsensusHash([(dbi + 1) as u8; 20]), @@ -2532,7 +2542,7 @@ pub mod test { for (dbi, burn_db) in ALL_BURN_DBS.iter().enumerate() { let mut conn = chainstate.block_begin( - burn_db, + *burn_db, &FIRST_BURNCHAIN_CONSENSUS_HASH, &FIRST_STACKS_BLOCK_HASH, &ConsensusHash([(dbi + 1) as u8; 20]), @@ -2646,7 +2656,7 @@ pub mod test { for (dbi, burn_db) in ALL_BURN_DBS.iter().enumerate() { // process both let mut conn = chainstate.block_begin( - burn_db, + *burn_db, &FIRST_BURNCHAIN_CONSENSUS_HASH, &FIRST_STACKS_BLOCK_HASH, &ConsensusHash([(dbi + 1) as u8; 20]), @@ -2785,7 +2795,7 @@ pub mod test { for (dbi, burn_db) in ALL_BURN_DBS.iter().enumerate() { // process both let mut conn = chainstate.block_begin( - burn_db, + *burn_db, &FIRST_BURNCHAIN_CONSENSUS_HASH, &FIRST_STACKS_BLOCK_HASH, &ConsensusHash([(dbi + 1) as u8; 20]), @@ -2895,7 +2905,7 @@ pub mod test { for (dbi, burn_db) in ALL_BURN_DBS.iter().enumerate() { let mut conn = chainstate.block_begin( - burn_db, + *burn_db, &FIRST_BURNCHAIN_CONSENSUS_HASH, &FIRST_STACKS_BLOCK_HASH, &ConsensusHash([(dbi + 1) as u8; 20]), @@ -3020,7 +3030,7 @@ pub mod test { for (dbi, burn_db) in ALL_BURN_DBS.iter().enumerate() { let mut conn = chainstate.block_begin( - burn_db, + *burn_db, &FIRST_BURNCHAIN_CONSENSUS_HASH, &FIRST_STACKS_BLOCK_HASH, &ConsensusHash([(dbi + 1) as u8; 20]), @@ -3130,7 +3140,7 @@ pub mod test { for (dbi, burn_db) in PRE_21_DBS.iter().enumerate() { let mut conn = chainstate.block_begin( - burn_db, + *burn_db, &FIRST_BURNCHAIN_CONSENSUS_HASH, &FIRST_STACKS_BLOCK_HASH, &ConsensusHash([(dbi + 1) as u8; 20]), @@ -3343,7 +3353,7 @@ pub mod test { for (dbi, burn_db) in ALL_BURN_DBS.iter().enumerate() { let mut conn = chainstate.block_begin( - burn_db, + *burn_db, &FIRST_BURNCHAIN_CONSENSUS_HASH, &FIRST_STACKS_BLOCK_HASH, &ConsensusHash([(dbi + 1) as u8; 20]), @@ -3886,7 +3896,7 @@ pub mod test { for (dbi, burn_db) in ALL_BURN_DBS.iter().enumerate() { // make sure costs-3 is instantiated, so as-contract works in 2.1 let mut conn = chainstate.test_genesis_block_begin_2_1( - burn_db, + *burn_db, &FIRST_BURNCHAIN_CONSENSUS_HASH, &FIRST_STACKS_BLOCK_HASH, &ConsensusHash([(dbi + 1) as u8; 20]), @@ -4609,7 +4619,7 @@ pub mod test { for (dbi, burn_db) in ALL_BURN_DBS.iter().enumerate() { // make sure costs-3 is installed so as-contract will work in epoch 2.1 let mut conn = chainstate.test_genesis_block_begin_2_1( - burn_db, + *burn_db, &FIRST_BURNCHAIN_CONSENSUS_HASH, &FIRST_STACKS_BLOCK_HASH, &ConsensusHash([(dbi + 1) as u8; 20]), @@ -4984,7 +4994,7 @@ pub mod test { let mut chainstate = instantiate_chainstate(false, 0x80000000, function_name!()); for (dbi, burn_db) in ALL_BURN_DBS.iter().enumerate() { let mut conn = chainstate.block_begin( - burn_db, + *burn_db, &FIRST_BURNCHAIN_CONSENSUS_HASH, &FIRST_STACKS_BLOCK_HASH, &ConsensusHash([(dbi + 1) as u8; 20]), @@ -8116,7 +8126,7 @@ pub mod test { // which leads to an InvalidFee error for (dbi, burn_db) in PRE_21_DBS.iter().enumerate() { let mut conn = chainstate.block_begin( - burn_db, + *burn_db, &FIRST_BURNCHAIN_CONSENSUS_HASH, &FIRST_STACKS_BLOCK_HASH, &ConsensusHash([(dbi + 1) as u8; 20]), @@ -8273,7 +8283,7 @@ pub mod test { for (dbi, burn_db) in ALL_BURN_DBS.iter().enumerate() { let mut conn = chainstate.block_begin( - burn_db, + *burn_db, &FIRST_BURNCHAIN_CONSENSUS_HASH, &FIRST_STACKS_BLOCK_HASH, &ConsensusHash([(dbi + 1) as u8; 20]), @@ -8394,7 +8404,7 @@ pub mod test { for (dbi, burn_db) in ALL_BURN_DBS.iter().enumerate() { let mut conn = chainstate.block_begin( - burn_db, + *burn_db, &FIRST_BURNCHAIN_CONSENSUS_HASH, &FIRST_STACKS_BLOCK_HASH, &ConsensusHash([(dbi + 1) as u8; 20]), @@ -8488,7 +8498,7 @@ pub mod test { for (dbi, burn_db) in ALL_BURN_DBS.iter().enumerate() { let mut conn = chainstate.block_begin( - burn_db, + *burn_db, &FIRST_BURNCHAIN_CONSENSUS_HASH, &FIRST_STACKS_BLOCK_HASH, &ConsensusHash([(dbi + 1) as u8; 20]), @@ -8620,6 +8630,14 @@ pub mod test { struct MockedBurnDB {} impl BurnStateDB for MockedBurnDB { + fn get_tip_burn_block_height(&self) -> Option { + Some(0) + } + + fn get_tip_sortition_id(&self) -> Option { + Some(SortitionId([0u8; 32])) + } + fn get_v1_unlock_height(&self) -> u32 { 2 } @@ -8842,6 +8860,14 @@ pub mod test { struct MockedBurnDB {} impl BurnStateDB for MockedBurnDB { + fn get_tip_burn_block_height(&self) -> Option { + Some(0) + } + + fn get_tip_sortition_id(&self) -> Option { + Some(SortitionId([0u8; 32])) + } + fn get_v1_unlock_height(&self) -> u32 { 2 } @@ -9038,7 +9064,7 @@ pub mod test { (as-contract (stx-transfer? amount tx-sender recipient)) ) - + (stx-transfer? u500000000 tx-sender (as-contract tx-sender)) "#; @@ -9203,7 +9229,7 @@ pub mod test { (as-contract (stx-transfer? amount tx-sender recipient)) ) - + (stx-transfer? u500000000 tx-sender (as-contract tx-sender)) "#; @@ -9372,6 +9398,27 @@ pub mod test { }; } + /// Call `process_transaction()` with prechecks + pub fn validate_transactions_static_epoch_and_process_transaction( + clarity_block: &mut ClarityTx, + tx: &StacksTransaction, + quiet: bool, + ast_rules: ASTRules, + ) -> Result<(u64, StacksTransactionReceipt), Error> { + let epoch = clarity_block.get_epoch(); + + if !StacksBlock::validate_transactions_static_epoch(&vec![tx.clone()], epoch) { + let msg = format!( + "Invalid transaction {}: target epoch is not activated", + tx.txid() + ); + warn!("{}", &msg); + return Err(Error::InvalidStacksTransaction(msg, false)); + } + + StacksChainState::process_transaction(clarity_block, tx, quiet, ast_rules) + } + #[test] fn test_checkerrors_at_runtime() { let privk = StacksPrivateKey::from_hex( @@ -9439,6 +9486,28 @@ pub mod test { let mut chainstate = instantiate_chainstate_with_balances(false, 0x80000000, function_name!(), balances); + let mut tx_runtime_checkerror_trait_no_version = StacksTransaction::new( + TransactionVersion::Testnet, + auth.clone(), + TransactionPayload::new_smart_contract( + &"foo".to_string(), + &runtime_checkerror_trait.to_string(), + None, + ) + .unwrap(), + ); + + tx_runtime_checkerror_trait_no_version.post_condition_mode = + TransactionPostConditionMode::Allow; + tx_runtime_checkerror_trait_no_version.chain_id = 0x80000000; + tx_runtime_checkerror_trait_no_version.set_tx_fee(1); + tx_runtime_checkerror_trait_no_version.set_origin_nonce(0); + + let mut signer = StacksTransactionSigner::new(&tx_runtime_checkerror_trait_no_version); + signer.sign_origin(&privk).unwrap(); + + let signed_runtime_checkerror_trait_tx_no_version = signer.get_tx().unwrap(); + let mut tx_runtime_checkerror_trait = StacksTransaction::new( TransactionVersion::Testnet, auth.clone(), @@ -9481,6 +9550,28 @@ pub mod test { let signed_runtime_checkerror_impl_tx = signer.get_tx().unwrap(); + let mut tx_runtime_checkerror_impl_no_version = StacksTransaction::new( + TransactionVersion::Testnet, + auth.clone(), + TransactionPayload::new_smart_contract( + &"foo-impl".to_string(), + &runtime_checkerror_impl.to_string(), + None, + ) + .unwrap(), + ); + + tx_runtime_checkerror_impl_no_version.post_condition_mode = + TransactionPostConditionMode::Allow; + tx_runtime_checkerror_impl_no_version.chain_id = 0x80000000; + tx_runtime_checkerror_impl_no_version.set_tx_fee(1); + tx_runtime_checkerror_impl_no_version.set_origin_nonce(1); + + let mut signer = StacksTransactionSigner::new(&tx_runtime_checkerror_impl_no_version); + signer.sign_origin(&privk).unwrap(); + + let signed_runtime_checkerror_impl_tx_no_version = signer.get_tx().unwrap(); + let mut tx_runtime_checkerror_clar1 = StacksTransaction::new( TransactionVersion::Testnet, auth.clone(), @@ -9502,6 +9593,28 @@ pub mod test { let signed_runtime_checkerror_tx_clar1 = signer.get_tx().unwrap(); + let mut tx_runtime_checkerror_clar1_no_version = StacksTransaction::new( + TransactionVersion::Testnet, + auth.clone(), + TransactionPayload::new_smart_contract( + &"trait-checkerror".to_string(), + &runtime_checkerror.to_string(), + None, + ) + .unwrap(), + ); + + tx_runtime_checkerror_clar1_no_version.post_condition_mode = + TransactionPostConditionMode::Allow; + tx_runtime_checkerror_clar1_no_version.chain_id = 0x80000000; + tx_runtime_checkerror_clar1_no_version.set_tx_fee(1); + tx_runtime_checkerror_clar1_no_version.set_origin_nonce(2); + + let mut signer = StacksTransactionSigner::new(&tx_runtime_checkerror_clar1_no_version); + signer.sign_origin(&privk).unwrap(); + + let signed_runtime_checkerror_tx_clar1_no_version = signer.get_tx().unwrap(); + let mut tx_runtime_checkerror_clar2 = StacksTransaction::new( TransactionVersion::Testnet, auth.clone(), @@ -9569,6 +9682,29 @@ pub mod test { let signed_runtime_checkerror_cc_contract_tx_clar1 = signer.get_tx().unwrap(); + let mut tx_runtime_checkerror_cc_contract_clar1_no_version = StacksTransaction::new( + TransactionVersion::Testnet, + auth.clone(), + TransactionPayload::new_smart_contract( + &"trait-checkerror-cc".to_string(), + &runtime_checkerror_contract.to_string(), + None, + ) + .unwrap(), + ); + + tx_runtime_checkerror_cc_contract_clar1_no_version.post_condition_mode = + TransactionPostConditionMode::Allow; + tx_runtime_checkerror_cc_contract_clar1_no_version.chain_id = 0x80000000; + tx_runtime_checkerror_cc_contract_clar1_no_version.set_tx_fee(1); + tx_runtime_checkerror_cc_contract_clar1_no_version.set_origin_nonce(3); + + let mut signer = + StacksTransactionSigner::new(&tx_runtime_checkerror_cc_contract_clar1_no_version); + signer.sign_origin(&privk).unwrap(); + + let signed_runtime_checkerror_cc_contract_tx_clar1_no_version = signer.get_tx().unwrap(); + let mut tx_runtime_checkerror_cc_contract_clar2 = StacksTransaction::new( TransactionVersion::Testnet, auth.clone(), @@ -9605,34 +9741,34 @@ pub mod test { &BlockHeaderHash([1u8; 32]), ); - let (fee, _) = StacksChainState::process_transaction( + let (fee, _) = validate_transactions_static_epoch_and_process_transaction( &mut conn, - &signed_runtime_checkerror_trait_tx, + &signed_runtime_checkerror_trait_tx_no_version, false, ASTRules::PrecheckSize, ) .unwrap(); assert_eq!(fee, 1); - let (fee, _) = StacksChainState::process_transaction( + let (fee, _) = validate_transactions_static_epoch_and_process_transaction( &mut conn, - &signed_runtime_checkerror_impl_tx, + &signed_runtime_checkerror_impl_tx_no_version, false, ASTRules::PrecheckSize, ) .unwrap(); assert_eq!(fee, 1); - let (fee, _) = StacksChainState::process_transaction( + let (fee, _) = validate_transactions_static_epoch_and_process_transaction( &mut conn, - &signed_runtime_checkerror_tx_clar1, + &signed_runtime_checkerror_tx_clar1_no_version, false, ASTRules::PrecheckSize, ) .unwrap(); assert_eq!(fee, 1); - let err = StacksChainState::process_transaction( + let err = validate_transactions_static_epoch_and_process_transaction( &mut conn, &signed_test_trait_checkerror_tx, false, @@ -9646,12 +9782,52 @@ pub mod test { } else { panic!("Did not get unchecked interpreter error"); } + + let err = validate_transactions_static_epoch_and_process_transaction( + &mut conn, + &signed_runtime_checkerror_impl_tx, + false, + ASTRules::PrecheckSize, + ) + .unwrap_err(); + if let Error::InvalidStacksTransaction(msg, _ignored) = err { + assert!(msg.find("target epoch is not activated").is_some()); + } else { + panic!("Did not get epoch is not activated error"); + } + + let err = validate_transactions_static_epoch_and_process_transaction( + &mut conn, + &signed_runtime_checkerror_tx_clar1, + false, + ASTRules::PrecheckSize, + ) + .unwrap_err(); + if let Error::InvalidStacksTransaction(msg, _ignored) = err { + assert!(msg.find("target epoch is not activated").is_some()); + } else { + panic!("Did not get epoch is not activated error"); + } + + let err = validate_transactions_static_epoch_and_process_transaction( + &mut conn, + &signed_runtime_checkerror_trait_tx, + false, + ASTRules::PrecheckSize, + ) + .unwrap_err(); + if let Error::InvalidStacksTransaction(msg, _ignored) = err { + assert!(msg.find("target epoch is not activated").is_some()); + } else { + panic!("Did not get epoch is not activated error"); + } + let acct = StacksChainState::get_account(&mut conn, &addr.into()); assert_eq!(acct.nonce, 3); - let err = StacksChainState::process_transaction( + let err = validate_transactions_static_epoch_and_process_transaction( &mut conn, - &signed_runtime_checkerror_cc_contract_tx_clar1, + &signed_runtime_checkerror_cc_contract_tx_clar1_no_version, false, ASTRules::PrecheckSize, ) @@ -9670,41 +9846,41 @@ pub mod test { // in 2.05, this invalidates the block let mut conn = chainstate.block_begin( - &TestBurnStateDB_20, + &TestBurnStateDB_2_05, &FIRST_BURNCHAIN_CONSENSUS_HASH, &FIRST_STACKS_BLOCK_HASH, &ConsensusHash([2u8; 20]), &BlockHeaderHash([2u8; 32]), ); - let (fee, _) = StacksChainState::process_transaction( + let (fee, _) = validate_transactions_static_epoch_and_process_transaction( &mut conn, - &signed_runtime_checkerror_trait_tx, + &signed_runtime_checkerror_trait_tx_no_version, false, ASTRules::PrecheckSize, ) .unwrap(); assert_eq!(fee, 1); - let (fee, _) = StacksChainState::process_transaction( + let (fee, _) = validate_transactions_static_epoch_and_process_transaction( &mut conn, - &signed_runtime_checkerror_impl_tx, + &signed_runtime_checkerror_impl_tx_no_version, false, ASTRules::PrecheckSize, ) .unwrap(); assert_eq!(fee, 1); - let (fee, _) = StacksChainState::process_transaction( + let (fee, _) = validate_transactions_static_epoch_and_process_transaction( &mut conn, - &signed_runtime_checkerror_tx_clar1, + &signed_runtime_checkerror_tx_clar1_no_version, false, ASTRules::PrecheckSize, ) .unwrap(); assert_eq!(fee, 1); - let err = StacksChainState::process_transaction( + let err = validate_transactions_static_epoch_and_process_transaction( &mut conn, &signed_test_trait_checkerror_tx, false, @@ -9718,12 +9894,51 @@ pub mod test { } else { panic!("Did not get unchecked interpreter error"); } + + let err = validate_transactions_static_epoch_and_process_transaction( + &mut conn, + &signed_runtime_checkerror_impl_tx, + false, + ASTRules::PrecheckSize, + ) + .unwrap_err(); + if let Error::InvalidStacksTransaction(msg, _ignored) = err { + assert!(msg.find("target epoch is not activated").is_some()); + } else { + panic!("Did not get epoch is not activated error"); + } + + let err = validate_transactions_static_epoch_and_process_transaction( + &mut conn, + &signed_runtime_checkerror_tx_clar1, + false, + ASTRules::PrecheckSize, + ) + .unwrap_err(); + if let Error::InvalidStacksTransaction(msg, _ignored) = err { + assert!(msg.find("target epoch is not activated").is_some()); + } else { + panic!("Did not get epoch is not activated error"); + } + + let err = validate_transactions_static_epoch_and_process_transaction( + &mut conn, + &signed_runtime_checkerror_trait_tx, + false, + ASTRules::PrecheckSize, + ) + .unwrap_err(); + if let Error::InvalidStacksTransaction(msg, _ignored) = err { + assert!(msg.find("target epoch is not activated").is_some()); + } else { + panic!("Did not get epoch is not activated error"); + } let acct = StacksChainState::get_account(&mut conn, &addr.into()); assert_eq!(acct.nonce, 3); - let err = StacksChainState::process_transaction( + let err = validate_transactions_static_epoch_and_process_transaction( &mut conn, - &signed_runtime_checkerror_cc_contract_tx_clar1, + &signed_runtime_checkerror_cc_contract_tx_clar1_no_version, false, ASTRules::PrecheckSize, ) @@ -9756,7 +9971,7 @@ pub mod test { let signed_runtime_checkerror_cc_contract_tx_clar1 = signer.get_tx().unwrap(); - let (fee, _) = StacksChainState::process_transaction( + let (fee, _) = validate_transactions_static_epoch_and_process_transaction( &mut conn, &signed_runtime_checkerror_trait_tx, false, @@ -9765,7 +9980,7 @@ pub mod test { .unwrap(); assert_eq!(fee, 1); - let (fee, _) = StacksChainState::process_transaction( + let (fee, _) = validate_transactions_static_epoch_and_process_transaction( &mut conn, &signed_runtime_checkerror_impl_tx, false, @@ -9783,7 +9998,7 @@ pub mod test { .unwrap(); assert_eq!(fee, 1); - let (fee, tx_receipt) = StacksChainState::process_transaction( + let (fee, tx_receipt) = validate_transactions_static_epoch_and_process_transaction( &mut conn, &signed_test_trait_checkerror_tx, false, @@ -9807,7 +10022,7 @@ pub mod test { .find("TypeValueError(OptionalType(CallableType(Trait(TraitIdentifier ") .is_some()); - let (fee, tx_receipt) = StacksChainState::process_transaction( + let (fee, tx_receipt) = validate_transactions_static_epoch_and_process_transaction( &mut conn, &signed_runtime_checkerror_cc_contract_tx_clar1, false, @@ -9842,7 +10057,7 @@ pub mod test { &BlockHeaderHash([4u8; 32]), ); - let (fee, _) = StacksChainState::process_transaction( + let (fee, _) = validate_transactions_static_epoch_and_process_transaction( &mut conn, &signed_runtime_checkerror_trait_tx, false, @@ -9851,7 +10066,7 @@ pub mod test { .unwrap(); assert_eq!(fee, 1); - let (fee, _) = StacksChainState::process_transaction( + let (fee, _) = validate_transactions_static_epoch_and_process_transaction( &mut conn, &signed_runtime_checkerror_impl_tx, false, @@ -9860,7 +10075,7 @@ pub mod test { .unwrap(); assert_eq!(fee, 1); - let (fee, _) = StacksChainState::process_transaction( + let (fee, _) = validate_transactions_static_epoch_and_process_transaction( &mut conn, &signed_runtime_checkerror_tx_clar2, false, @@ -9869,7 +10084,7 @@ pub mod test { .unwrap(); assert_eq!(fee, 1); - let (fee, tx_receipt) = StacksChainState::process_transaction( + let (fee, tx_receipt) = validate_transactions_static_epoch_and_process_transaction( &mut conn, &signed_test_trait_checkerror_tx, false, @@ -9889,7 +10104,7 @@ pub mod test { assert!(tx_receipt.vm_error.is_none()); - let (fee, tx_receipt) = StacksChainState::process_transaction( + let (fee, tx_receipt) = validate_transactions_static_epoch_and_process_transaction( &mut conn, &signed_runtime_checkerror_cc_contract_tx_clar2, false, @@ -9976,6 +10191,27 @@ pub mod test { let signed_foo_trait_tx = signer.get_tx().unwrap(); + let mut tx_foo_trait_no_version = StacksTransaction::new( + TransactionVersion::Testnet, + auth.clone(), + TransactionPayload::new_smart_contract( + &"foo".to_string(), + &foo_trait.to_string(), + None, + ) + .unwrap(), + ); + + tx_foo_trait_no_version.post_condition_mode = TransactionPostConditionMode::Allow; + tx_foo_trait_no_version.chain_id = 0x80000000; + tx_foo_trait_no_version.set_tx_fee(1); + tx_foo_trait_no_version.set_origin_nonce(0); + + let mut signer = StacksTransactionSigner::new(&tx_foo_trait_no_version); + signer.sign_origin(&privk).unwrap(); + + let signed_foo_trait_tx_no_version = signer.get_tx().unwrap(); + let mut tx_foo_impl = StacksTransaction::new( TransactionVersion::Testnet, auth.clone(), @@ -9997,6 +10233,27 @@ pub mod test { let signed_foo_impl_tx = signer.get_tx().unwrap(); + let mut tx_foo_impl_no_version = StacksTransaction::new( + TransactionVersion::Testnet, + auth.clone(), + TransactionPayload::new_smart_contract( + &"foo-impl".to_string(), + &foo_impl.to_string(), + None, + ) + .unwrap(), + ); + + tx_foo_impl_no_version.post_condition_mode = TransactionPostConditionMode::Allow; + tx_foo_impl_no_version.chain_id = 0x80000000; + tx_foo_impl_no_version.set_tx_fee(1); + tx_foo_impl_no_version.set_origin_nonce(1); + + let mut signer = StacksTransactionSigner::new(&tx_foo_impl_no_version); + signer.sign_origin(&privk).unwrap(); + + let signed_foo_impl_tx_no_version = signer.get_tx().unwrap(); + let mut tx_call_foo_clar1 = StacksTransaction::new( TransactionVersion::Testnet, auth.clone(), @@ -10018,6 +10275,27 @@ pub mod test { let signed_call_foo_tx_clar1 = signer.get_tx().unwrap(); + let mut tx_call_foo_clar1_no_version = StacksTransaction::new( + TransactionVersion::Testnet, + auth.clone(), + TransactionPayload::new_smart_contract( + &"call-foo".to_string(), + &call_foo.to_string(), + None, + ) + .unwrap(), + ); + + tx_call_foo_clar1_no_version.post_condition_mode = TransactionPostConditionMode::Allow; + tx_call_foo_clar1_no_version.chain_id = 0x80000000; + tx_call_foo_clar1_no_version.set_tx_fee(1); + tx_call_foo_clar1_no_version.set_origin_nonce(2); + + let mut signer = StacksTransactionSigner::new(&tx_call_foo_clar1_no_version); + signer.sign_origin(&privk).unwrap(); + + let signed_call_foo_tx_clar1_no_version = signer.get_tx().unwrap(); + let mut tx_call_foo_clar2 = StacksTransaction::new( TransactionVersion::Testnet, auth.clone(), @@ -10078,27 +10356,27 @@ pub mod test { &BlockHeaderHash([1u8; 32]), ); - let (fee, _) = StacksChainState::process_transaction( + let (fee, _) = validate_transactions_static_epoch_and_process_transaction( &mut conn, - &signed_foo_trait_tx, + &signed_foo_trait_tx_no_version, false, ASTRules::PrecheckSize, ) .unwrap(); assert_eq!(fee, 1); - let (fee, _) = StacksChainState::process_transaction( + let (fee, _) = validate_transactions_static_epoch_and_process_transaction( &mut conn, - &signed_foo_impl_tx, + &signed_foo_impl_tx_no_version, false, ASTRules::PrecheckSize, ) .unwrap(); assert_eq!(fee, 1); - let (fee, tx_receipt) = StacksChainState::process_transaction( + let (fee, tx_receipt) = validate_transactions_static_epoch_and_process_transaction( &mut conn, - &signed_call_foo_tx_clar1, + &signed_call_foo_tx_clar1_no_version, false, ASTRules::PrecheckSize, ) @@ -10112,38 +10390,77 @@ pub mod test { _ => panic!("expected the contract publish to fail"), } + let err = validate_transactions_static_epoch_and_process_transaction( + &mut conn, + &signed_foo_trait_tx, + false, + ASTRules::PrecheckSize, + ) + .unwrap_err(); + if let Error::InvalidStacksTransaction(msg, _ignored) = err { + assert!(msg.find("target epoch is not activated").is_some()); + } else { + panic!("Did not get epoch is not activated error"); + } + + let err = validate_transactions_static_epoch_and_process_transaction( + &mut conn, + &signed_foo_impl_tx, + false, + ASTRules::PrecheckSize, + ) + .unwrap_err(); + if let Error::InvalidStacksTransaction(msg, _ignored) = err { + assert!(msg.find("target epoch is not activated").is_some()); + } else { + panic!("Did not get epoch is not activated error"); + } + + let err = validate_transactions_static_epoch_and_process_transaction( + &mut conn, + &signed_call_foo_tx_clar1, + false, + ASTRules::PrecheckSize, + ) + .unwrap_err(); + if let Error::InvalidStacksTransaction(msg, _ignored) = err { + assert!(msg.find("target epoch is not activated").is_some()); + } else { + panic!("Did not get epoch is not activated error"); + } + conn.commit_block(); // in 2.05: analysis error should cause contract publish to fail let mut conn = chainstate.block_begin( - &TestBurnStateDB_20, + &TestBurnStateDB_2_05, &FIRST_BURNCHAIN_CONSENSUS_HASH, &FIRST_STACKS_BLOCK_HASH, &ConsensusHash([2u8; 20]), &BlockHeaderHash([2u8; 32]), ); - let (fee, _) = StacksChainState::process_transaction( + let (fee, _) = validate_transactions_static_epoch_and_process_transaction( &mut conn, - &signed_foo_trait_tx, + &signed_foo_trait_tx_no_version, false, ASTRules::PrecheckSize, ) .unwrap(); assert_eq!(fee, 1); - let (fee, _) = StacksChainState::process_transaction( + let (fee, _) = validate_transactions_static_epoch_and_process_transaction( &mut conn, - &signed_foo_impl_tx, + &signed_foo_impl_tx_no_version, false, ASTRules::PrecheckSize, ) .unwrap(); assert_eq!(fee, 1); - let (fee, tx_receipt) = StacksChainState::process_transaction( + let (fee, tx_receipt) = validate_transactions_static_epoch_and_process_transaction( &mut conn, - &signed_call_foo_tx_clar1, + &signed_call_foo_tx_clar1_no_version, false, ASTRules::PrecheckSize, ) @@ -10157,7 +10474,7 @@ pub mod test { _ => panic!("expected the contract publish to fail"), } - let err = StacksChainState::process_transaction( + let err = validate_transactions_static_epoch_and_process_transaction( &mut conn, &signed_test_call_foo_tx, false, @@ -10172,6 +10489,45 @@ pub mod test { panic!("Did not get unchecked interpreter error"); } + let err = validate_transactions_static_epoch_and_process_transaction( + &mut conn, + &signed_foo_trait_tx, + false, + ASTRules::PrecheckSize, + ) + .unwrap_err(); + if let Error::InvalidStacksTransaction(msg, _ignored) = err { + assert!(msg.find("target epoch is not activated").is_some()); + } else { + panic!("Did not get epoch is not activated error"); + } + + let err = validate_transactions_static_epoch_and_process_transaction( + &mut conn, + &signed_foo_impl_tx, + false, + ASTRules::PrecheckSize, + ) + .unwrap_err(); + if let Error::InvalidStacksTransaction(msg, _ignored) = err { + assert!(msg.find("target epoch is not activated").is_some()); + } else { + panic!("Did not get epoch is not activated error"); + } + + let err = validate_transactions_static_epoch_and_process_transaction( + &mut conn, + &signed_call_foo_tx_clar1, + false, + ASTRules::PrecheckSize, + ) + .unwrap_err(); + if let Error::InvalidStacksTransaction(msg, _ignored) = err { + assert!(msg.find("target epoch is not activated").is_some()); + } else { + panic!("Did not get epoch is not activated error"); + } + conn.commit_block(); // in 2.1, using clarity 1: analysis error should cause contract publish to fail @@ -10183,7 +10539,7 @@ pub mod test { &BlockHeaderHash([3u8; 32]), ); - let (fee, _) = StacksChainState::process_transaction( + let (fee, _) = validate_transactions_static_epoch_and_process_transaction( &mut conn, &signed_foo_trait_tx, false, @@ -10192,7 +10548,7 @@ pub mod test { .unwrap(); assert_eq!(fee, 1); - let (fee, _) = StacksChainState::process_transaction( + let (fee, _) = validate_transactions_static_epoch_and_process_transaction( &mut conn, &signed_foo_impl_tx, false, @@ -10201,7 +10557,7 @@ pub mod test { .unwrap(); assert_eq!(fee, 1); - let (fee, tx_receipt) = StacksChainState::process_transaction( + let (fee, tx_receipt) = validate_transactions_static_epoch_and_process_transaction( &mut conn, &signed_call_foo_tx_clar1, false, @@ -10228,7 +10584,7 @@ pub mod test { &BlockHeaderHash([4u8; 32]), ); - let (fee, _) = StacksChainState::process_transaction( + let (fee, _) = validate_transactions_static_epoch_and_process_transaction( &mut conn, &signed_foo_trait_tx, false, @@ -10237,7 +10593,7 @@ pub mod test { .unwrap(); assert_eq!(fee, 1); - let (fee, _) = StacksChainState::process_transaction( + let (fee, _) = validate_transactions_static_epoch_and_process_transaction( &mut conn, &signed_foo_impl_tx, false, @@ -10246,7 +10602,7 @@ pub mod test { .unwrap(); assert_eq!(fee, 1); - let (fee, _) = StacksChainState::process_transaction( + let (fee, _) = validate_transactions_static_epoch_and_process_transaction( &mut conn, &signed_call_foo_tx_clar2, false, @@ -10255,7 +10611,7 @@ pub mod test { .unwrap(); assert_eq!(fee, 1); - let (fee, tx_receipt) = StacksChainState::process_transaction( + let (fee, tx_receipt) = validate_transactions_static_epoch_and_process_transaction( &mut conn, &signed_test_call_foo_tx, false, @@ -10348,6 +10704,27 @@ pub mod test { let signed_foo_trait_tx = signer.get_tx().unwrap(); + let mut tx_foo_trait_no_version = StacksTransaction::new( + TransactionVersion::Testnet, + auth.clone(), + TransactionPayload::new_smart_contract( + &"foo".to_string(), + &foo_trait.to_string(), + None, + ) + .unwrap(), + ); + + tx_foo_trait_no_version.post_condition_mode = TransactionPostConditionMode::Allow; + tx_foo_trait_no_version.chain_id = 0x80000000; + tx_foo_trait_no_version.set_tx_fee(1); + tx_foo_trait_no_version.set_origin_nonce(0); + + let mut signer = StacksTransactionSigner::new(&tx_foo_trait_no_version); + signer.sign_origin(&privk).unwrap(); + + let signed_foo_trait_tx_no_version = signer.get_tx().unwrap(); + let mut tx_transitive_trait_clar1 = StacksTransaction::new( TransactionVersion::Testnet, auth.clone(), @@ -10369,6 +10746,28 @@ pub mod test { let signed_transitive_trait_clar1_tx = signer.get_tx().unwrap(); + let mut tx_transitive_trait_clar1_no_version = StacksTransaction::new( + TransactionVersion::Testnet, + auth.clone(), + TransactionPayload::new_smart_contract( + &"transitive".to_string(), + &transitive_trait.to_string(), + None, + ) + .unwrap(), + ); + + tx_transitive_trait_clar1_no_version.post_condition_mode = + TransactionPostConditionMode::Allow; + tx_transitive_trait_clar1_no_version.chain_id = 0x80000000; + tx_transitive_trait_clar1_no_version.set_tx_fee(1); + tx_transitive_trait_clar1_no_version.set_origin_nonce(1); + + let mut signer = StacksTransactionSigner::new(&tx_transitive_trait_clar1_no_version); + signer.sign_origin(&privk).unwrap(); + + let signed_transitive_trait_clar1_tx_no_version = signer.get_tx().unwrap(); + let mut tx_transitive_trait_clar2 = StacksTransaction::new( TransactionVersion::Testnet, auth.clone(), @@ -10411,6 +10810,27 @@ pub mod test { let signed_foo_impl_tx = signer.get_tx().unwrap(); + let mut tx_foo_impl_no_version = StacksTransaction::new( + TransactionVersion::Testnet, + auth.clone(), + TransactionPayload::new_smart_contract( + &"foo-impl".to_string(), + &foo_impl.to_string(), + None, + ) + .unwrap(), + ); + + tx_foo_impl_no_version.post_condition_mode = TransactionPostConditionMode::Allow; + tx_foo_impl_no_version.chain_id = 0x80000000; + tx_foo_impl_no_version.set_tx_fee(1); + tx_foo_impl_no_version.set_origin_nonce(2); + + let mut signer = StacksTransactionSigner::new(&tx_foo_impl_no_version); + signer.sign_origin(&privk).unwrap(); + + let signed_foo_impl_tx_no_version = signer.get_tx().unwrap(); + let mut tx_call_foo_clar1 = StacksTransaction::new( TransactionVersion::Testnet, auth.clone(), @@ -10432,6 +10852,27 @@ pub mod test { let signed_call_foo_tx_clar1 = signer.get_tx().unwrap(); + let mut tx_call_foo_clar1_no_version = StacksTransaction::new( + TransactionVersion::Testnet, + auth.clone(), + TransactionPayload::new_smart_contract( + &"call-foo".to_string(), + &call_foo.to_string(), + None, + ) + .unwrap(), + ); + + tx_call_foo_clar1_no_version.post_condition_mode = TransactionPostConditionMode::Allow; + tx_call_foo_clar1_no_version.chain_id = 0x80000000; + tx_call_foo_clar1_no_version.set_tx_fee(1); + tx_call_foo_clar1_no_version.set_origin_nonce(3); + + let mut signer = StacksTransactionSigner::new(&tx_call_foo_clar1_no_version); + signer.sign_origin(&privk).unwrap(); + + let signed_call_foo_tx_clar1_no_version = signer.get_tx().unwrap(); + let mut tx_call_foo_clar2 = StacksTransaction::new( TransactionVersion::Testnet, auth.clone(), @@ -10491,43 +10932,43 @@ pub mod test { &BlockHeaderHash([1u8; 32]), ); - let (fee, _) = StacksChainState::process_transaction( + let (fee, _) = validate_transactions_static_epoch_and_process_transaction( &mut conn, - &signed_foo_trait_tx, + &signed_foo_trait_tx_no_version, false, ASTRules::PrecheckSize, ) .unwrap(); assert_eq!(fee, 1); - let (fee, _) = StacksChainState::process_transaction( + let (fee, _) = validate_transactions_static_epoch_and_process_transaction( &mut conn, - &signed_transitive_trait_clar1_tx, + &signed_transitive_trait_clar1_tx_no_version, false, ASTRules::PrecheckSize, ) .unwrap(); assert_eq!(fee, 1); - let (fee, _) = StacksChainState::process_transaction( + let (fee, _) = validate_transactions_static_epoch_and_process_transaction( &mut conn, - &signed_foo_impl_tx, + &signed_foo_impl_tx_no_version, false, ASTRules::PrecheckSize, ) .unwrap(); assert_eq!(fee, 1); - let (fee, tx_receipt) = StacksChainState::process_transaction( + let (fee, tx_receipt) = validate_transactions_static_epoch_and_process_transaction( &mut conn, - &signed_call_foo_tx_clar1, + &signed_call_foo_tx_clar1_no_version, false, ASTRules::PrecheckSize, ) .unwrap(); assert_eq!(fee, 1); - let err = StacksChainState::process_transaction( + let err = validate_transactions_static_epoch_and_process_transaction( &mut conn, &signed_test_call_foo_tx, false, @@ -10543,54 +10984,106 @@ pub mod test { } assert_eq!(fee, 1); + let err = validate_transactions_static_epoch_and_process_transaction( + &mut conn, + &signed_foo_trait_tx, + false, + ASTRules::PrecheckSize, + ) + .unwrap_err(); + if let Error::InvalidStacksTransaction(msg, _ignored) = err { + assert!(msg.find("target epoch is not activated").is_some()); + } else { + panic!("Did not get epoch is not activated error"); + } + + let err = validate_transactions_static_epoch_and_process_transaction( + &mut conn, + &signed_transitive_trait_clar1_tx, + false, + ASTRules::PrecheckSize, + ) + .unwrap_err(); + if let Error::InvalidStacksTransaction(msg, _ignored) = err { + assert!(msg.find("target epoch is not activated").is_some()); + } else { + panic!("Did not get epoch is not activated error"); + } + + let err = validate_transactions_static_epoch_and_process_transaction( + &mut conn, + &signed_foo_impl_tx, + false, + ASTRules::PrecheckSize, + ) + .unwrap_err(); + if let Error::InvalidStacksTransaction(msg, _ignored) = err { + assert!(msg.find("target epoch is not activated").is_some()); + } else { + panic!("Did not get epoch is not activated error"); + } + + let err = validate_transactions_static_epoch_and_process_transaction( + &mut conn, + &signed_call_foo_tx_clar1, + false, + ASTRules::PrecheckSize, + ) + .unwrap_err(); + if let Error::InvalidStacksTransaction(msg, _ignored) = err { + assert!(msg.find("target epoch is not activated").is_some()); + } else { + panic!("Did not get epoch is not activated error"); + } + conn.commit_block(); // in 2.05: calling call-foo invalidates the block let mut conn = chainstate.block_begin( - &TestBurnStateDB_20, + &TestBurnStateDB_2_05, &FIRST_BURNCHAIN_CONSENSUS_HASH, &FIRST_STACKS_BLOCK_HASH, &ConsensusHash([2u8; 20]), &BlockHeaderHash([2u8; 32]), ); - let (fee, _) = StacksChainState::process_transaction( + let (fee, _) = validate_transactions_static_epoch_and_process_transaction( &mut conn, - &signed_foo_trait_tx, + &signed_foo_trait_tx_no_version, false, ASTRules::PrecheckSize, ) .unwrap(); assert_eq!(fee, 1); - let (fee, _) = StacksChainState::process_transaction( + let (fee, _) = validate_transactions_static_epoch_and_process_transaction( &mut conn, - &signed_transitive_trait_clar1_tx, + &signed_transitive_trait_clar1_tx_no_version, false, ASTRules::PrecheckSize, ) .unwrap(); assert_eq!(fee, 1); - let (fee, _) = StacksChainState::process_transaction( + let (fee, _) = validate_transactions_static_epoch_and_process_transaction( &mut conn, - &signed_foo_impl_tx, + &signed_foo_impl_tx_no_version, false, ASTRules::PrecheckSize, ) .unwrap(); assert_eq!(fee, 1); - let (fee, tx_receipt) = StacksChainState::process_transaction( + let (fee, tx_receipt) = validate_transactions_static_epoch_and_process_transaction( &mut conn, - &signed_call_foo_tx_clar1, + &signed_call_foo_tx_clar1_no_version, false, ASTRules::PrecheckSize, ) .unwrap(); assert_eq!(fee, 1); - let err = StacksChainState::process_transaction( + let err = validate_transactions_static_epoch_and_process_transaction( &mut conn, &signed_test_call_foo_tx, false, @@ -10605,6 +11098,58 @@ pub mod test { panic!("Did not get unchecked interpreter error"); } + let err = validate_transactions_static_epoch_and_process_transaction( + &mut conn, + &signed_foo_trait_tx, + false, + ASTRules::PrecheckSize, + ) + .unwrap_err(); + if let Error::InvalidStacksTransaction(msg, _ignored) = err { + assert!(msg.find("target epoch is not activated").is_some()); + } else { + panic!("Did not get epoch is not activated error"); + } + + let err = validate_transactions_static_epoch_and_process_transaction( + &mut conn, + &signed_transitive_trait_clar1_tx, + false, + ASTRules::PrecheckSize, + ) + .unwrap_err(); + if let Error::InvalidStacksTransaction(msg, _ignored) = err { + assert!(msg.find("target epoch is not activated").is_some()); + } else { + panic!("Did not get epoch is not activated error"); + } + + let err = validate_transactions_static_epoch_and_process_transaction( + &mut conn, + &signed_foo_impl_tx, + false, + ASTRules::PrecheckSize, + ) + .unwrap_err(); + if let Error::InvalidStacksTransaction(msg, _ignored) = err { + assert!(msg.find("target epoch is not activated").is_some()); + } else { + panic!("Did not get epoch is not activated error"); + } + + let err = validate_transactions_static_epoch_and_process_transaction( + &mut conn, + &signed_call_foo_tx_clar1, + false, + ASTRules::PrecheckSize, + ) + .unwrap_err(); + if let Error::InvalidStacksTransaction(msg, _ignored) = err { + assert!(msg.find("target epoch is not activated").is_some()); + } else { + panic!("Did not get epoch is not activated error"); + } + conn.commit_block(); // in 2.1, using clarity 1 for both `transitive` and `call-foo`: calling call-foo causes an analysis error @@ -10616,7 +11161,7 @@ pub mod test { &BlockHeaderHash([3u8; 32]), ); - let (fee, _) = StacksChainState::process_transaction( + let (fee, _) = validate_transactions_static_epoch_and_process_transaction( &mut conn, &signed_foo_trait_tx, false, @@ -10625,7 +11170,7 @@ pub mod test { .unwrap(); assert_eq!(fee, 1); - let (fee, _) = StacksChainState::process_transaction( + let (fee, _) = validate_transactions_static_epoch_and_process_transaction( &mut conn, &signed_transitive_trait_clar1_tx, false, @@ -10634,7 +11179,7 @@ pub mod test { .unwrap(); assert_eq!(fee, 1); - let (fee, _) = StacksChainState::process_transaction( + let (fee, _) = validate_transactions_static_epoch_and_process_transaction( &mut conn, &signed_foo_impl_tx, false, @@ -10643,7 +11188,7 @@ pub mod test { .unwrap(); assert_eq!(fee, 1); - let (fee, tx_receipt) = StacksChainState::process_transaction( + let (fee, tx_receipt) = validate_transactions_static_epoch_and_process_transaction( &mut conn, &signed_call_foo_tx_clar1, false, @@ -10652,7 +11197,7 @@ pub mod test { .unwrap(); assert_eq!(fee, 1); - let (fee, tx_receipt) = StacksChainState::process_transaction( + let (fee, tx_receipt) = validate_transactions_static_epoch_and_process_transaction( &mut conn, &signed_test_call_foo_tx, false, @@ -10683,7 +11228,7 @@ pub mod test { &BlockHeaderHash([4u8; 32]), ); - let (fee, _) = StacksChainState::process_transaction( + let (fee, _) = validate_transactions_static_epoch_and_process_transaction( &mut conn, &signed_foo_trait_tx, false, @@ -10692,7 +11237,7 @@ pub mod test { .unwrap(); assert_eq!(fee, 1); - let (fee, _) = StacksChainState::process_transaction( + let (fee, _) = validate_transactions_static_epoch_and_process_transaction( &mut conn, &signed_transitive_trait_clar1_tx, false, @@ -10701,7 +11246,7 @@ pub mod test { .unwrap(); assert_eq!(fee, 1); - let (fee, _) = StacksChainState::process_transaction( + let (fee, _) = validate_transactions_static_epoch_and_process_transaction( &mut conn, &signed_foo_impl_tx, false, @@ -10710,7 +11255,7 @@ pub mod test { .unwrap(); assert_eq!(fee, 1); - let (fee, tx_receipt) = StacksChainState::process_transaction( + let (fee, tx_receipt) = validate_transactions_static_epoch_and_process_transaction( &mut conn, &signed_call_foo_tx_clar2, false, @@ -10719,7 +11264,7 @@ pub mod test { .unwrap(); assert_eq!(fee, 1); - let (fee, tx_receipt) = StacksChainState::process_transaction( + let (fee, tx_receipt) = validate_transactions_static_epoch_and_process_transaction( &mut conn, &signed_test_call_foo_tx, false, @@ -10750,7 +11295,7 @@ pub mod test { &BlockHeaderHash([5u8; 32]), ); - let (fee, _) = StacksChainState::process_transaction( + let (fee, _) = validate_transactions_static_epoch_and_process_transaction( &mut conn, &signed_foo_trait_tx, false, @@ -10759,7 +11304,7 @@ pub mod test { .unwrap(); assert_eq!(fee, 1); - let (fee, _) = StacksChainState::process_transaction( + let (fee, _) = validate_transactions_static_epoch_and_process_transaction( &mut conn, &signed_transitive_trait_clar2_tx, false, @@ -10768,7 +11313,7 @@ pub mod test { .unwrap(); assert_eq!(fee, 1); - let (fee, _) = StacksChainState::process_transaction( + let (fee, _) = validate_transactions_static_epoch_and_process_transaction( &mut conn, &signed_foo_impl_tx, false, @@ -10777,7 +11322,7 @@ pub mod test { .unwrap(); assert_eq!(fee, 1); - let (fee, tx_receipt) = StacksChainState::process_transaction( + let (fee, tx_receipt) = validate_transactions_static_epoch_and_process_transaction( &mut conn, &signed_call_foo_tx_clar2, false, @@ -10804,7 +11349,7 @@ pub mod test { &BlockHeaderHash([6u8; 32]), ); - let (fee, _) = StacksChainState::process_transaction( + let (fee, _) = validate_transactions_static_epoch_and_process_transaction( &mut conn, &signed_foo_trait_tx, false, @@ -10813,7 +11358,7 @@ pub mod test { .unwrap(); assert_eq!(fee, 1); - let (fee, _) = StacksChainState::process_transaction( + let (fee, _) = validate_transactions_static_epoch_and_process_transaction( &mut conn, &signed_transitive_trait_clar2_tx, false, @@ -10822,7 +11367,7 @@ pub mod test { .unwrap(); assert_eq!(fee, 1); - let (fee, _) = StacksChainState::process_transaction( + let (fee, _) = validate_transactions_static_epoch_and_process_transaction( &mut conn, &signed_foo_impl_tx, false, @@ -10831,7 +11376,7 @@ pub mod test { .unwrap(); assert_eq!(fee, 1); - let (fee, tx_receipt) = StacksChainState::process_transaction( + let (fee, tx_receipt) = validate_transactions_static_epoch_and_process_transaction( &mut conn, &signed_call_foo_tx_clar1, false, diff --git a/stackslib/src/chainstate/stacks/db/unconfirmed.rs b/stackslib/src/chainstate/stacks/db/unconfirmed.rs index 92d32dd0389..52afaceb661 100644 --- a/stackslib/src/chainstate/stacks/db/unconfirmed.rs +++ b/stackslib/src/chainstate/stacks/db/unconfirmed.rs @@ -232,7 +232,8 @@ impl UnconfirmedState { mblocks.len() ); - let headers_db = HeadersDBConn(chainstate.db()); + let stacks_dbconn = chainstate.index_conn(); + let headers_db = HeadersDBConn(stacks_dbconn); let burn_block_hash = headers_db .get_burn_header_hash_for_block(&self.confirmed_chain_tip) .expect("BUG: unable to get burn block hash based on chain tip"); @@ -240,7 +241,7 @@ impl UnconfirmedState { .get_burn_block_height_for_block(&self.confirmed_chain_tip) .expect("BUG: unable to get burn block height based on chain tip"); let burn_block_timestamp = headers_db - .get_burn_block_time_for_block(&self.confirmed_chain_tip) + .get_burn_block_time_for_block(&self.confirmed_chain_tip, None) .expect("BUG: unable to get burn block timestamp based on chain tip"); let ast_rules = burn_dbconn.get_ast_rules(burn_block_height); @@ -260,14 +261,13 @@ impl UnconfirmedState { if mblocks.len() > 0 { let cur_cost = self.cost_so_far.clone(); - let headers_db_conn = HeadersDBConn(chainstate.db()); // NOTE: we *must* commit the clarity_tx now that it's begun. // Otherwise, microblock miners can leave the MARF in a partially-initialized state, // leading to a node crash. let mut clarity_tx = StacksChainState::chainstate_begin_unconfirmed( db_config, - &headers_db_conn, + &headers_db, &mut self.clarity_inst, burn_dbconn, &self.confirmed_chain_tip, @@ -754,7 +754,7 @@ mod test { StacksBlockBuilder::make_anchored_block_from_txs( block_builder, chainstate, - &sortdb.index_conn(), + &sortdb.index_handle_at_tip(), vec![coinbase_tx], ) .unwrap(); @@ -777,8 +777,9 @@ mod test { // build 1-block microblock stream let microblocks = { let sortdb = peer.sortdb.take().unwrap(); - let sort_iconn = sortdb.index_conn(); - + let sort_iconn = sortdb + .index_handle_at_block(&peer.chainstate(), &canonical_tip) + .unwrap(); peer.chainstate() .reload_unconfirmed_state(&sort_iconn, canonical_tip.clone()) .unwrap(); @@ -851,13 +852,16 @@ mod test { // process microblock stream to generate unconfirmed state let sortdb = peer.sortdb.take().unwrap(); + let iconn = sortdb + .index_handle_at_block(&peer.chainstate(), &canonical_tip) + .unwrap(); peer.chainstate() - .reload_unconfirmed_state(&sortdb.index_conn(), canonical_tip.clone()) + .reload_unconfirmed_state(&iconn, canonical_tip.clone()) .unwrap(); let recv_balance = peer .chainstate() - .with_read_only_unconfirmed_clarity_tx(&sortdb.index_conn(), |clarity_tx| { + .with_read_only_unconfirmed_clarity_tx(&iconn, |clarity_tx| { clarity_tx.with_clarity_db_readonly(|clarity_db| { clarity_db .get_account_stx_balance(&recv_addr.into()) @@ -874,9 +878,12 @@ mod test { SortitionDB::get_canonical_stacks_chain_tip_hash(peer.sortdb().conn()).unwrap(); let sortdb = peer.sortdb.take().unwrap(); + let iconn = sortdb + .index_handle_at_block(&peer.chainstate(), &canonical_tip) + .unwrap(); let confirmed_recv_balance = peer .chainstate() - .with_read_only_clarity_tx(&sortdb.index_conn(), &canonical_tip, |clarity_tx| { + .with_read_only_clarity_tx(&iconn, &canonical_tip, |clarity_tx| { clarity_tx.with_clarity_db_readonly(|clarity_db| { clarity_db .get_account_stx_balance(&recv_addr.into()) @@ -984,7 +991,7 @@ mod test { StacksBlockBuilder::make_anchored_block_from_txs( block_builder, chainstate, - &sortdb.index_conn(), + &sortdb.index_handle_at_tip(), vec![coinbase_tx], ) .unwrap(); @@ -1007,9 +1014,11 @@ mod test { // build microblock stream iteratively, and test balances at each additional microblock let sortdb = peer.sortdb.take().unwrap(); let microblocks = { - let sort_iconn = sortdb.index_conn(); + let sort_iconn = sortdb + .index_handle_at_block(&peer.chainstate(), &canonical_tip) + .unwrap(); peer.chainstate() - .reload_unconfirmed_state(&sortdb.index_conn(), canonical_tip.clone()) + .reload_unconfirmed_state(&sort_iconn, canonical_tip.clone()) .unwrap(); let mut microblock_builder = StacksMicroblockBuilder::new( @@ -1083,18 +1092,21 @@ mod test { // process microblock stream to generate unconfirmed state let sortdb = peer.sortdb.take().unwrap(); peer.chainstate() - .reload_unconfirmed_state(&sortdb.index_conn(), canonical_tip.clone()) + .reload_unconfirmed_state(&sortdb.index_handle_at_tip(), canonical_tip.clone()) .unwrap(); let recv_balance = peer .chainstate() - .with_read_only_unconfirmed_clarity_tx(&sortdb.index_conn(), |clarity_tx| { - clarity_tx.with_clarity_db_readonly(|clarity_db| { - clarity_db - .get_account_stx_balance(&recv_addr.into()) - .unwrap() - }) - }) + .with_read_only_unconfirmed_clarity_tx( + &sortdb.index_handle_at_tip(), + |clarity_tx| { + clarity_tx.with_clarity_db_readonly(|clarity_db| { + clarity_db + .get_account_stx_balance(&recv_addr.into()) + .unwrap() + }) + }, + ) .unwrap() .unwrap(); peer.sortdb = Some(sortdb); @@ -1110,13 +1122,17 @@ mod test { let sortdb = peer.sortdb.take().unwrap(); let confirmed_recv_balance = peer .chainstate() - .with_read_only_clarity_tx(&sortdb.index_conn(), &canonical_tip, |clarity_tx| { - clarity_tx.with_clarity_db_readonly(|clarity_db| { - clarity_db - .get_account_stx_balance(&recv_addr.into()) - .unwrap() - }) - }) + .with_read_only_clarity_tx( + &sortdb.index_handle_at_tip(), + &canonical_tip, + |clarity_tx| { + clarity_tx.with_clarity_db_readonly(|clarity_db| { + clarity_db + .get_account_stx_balance(&recv_addr.into()) + .unwrap() + }) + }, + ) .unwrap(); peer.sortdb = Some(sortdb); @@ -1270,7 +1286,7 @@ mod test { StacksBlockBuilder::make_anchored_block_from_txs( block_builder, chainstate, - &sortdb.index_conn(), + &sortdb.index_handle_at_tip(), vec![coinbase_tx, anchored_tx], ) .unwrap(); @@ -1297,7 +1313,7 @@ mod test { Relayer::refresh_unconfirmed(&mut inner_node.chainstate, &mut sortdb); let microblock = { - let sort_iconn = sortdb.index_conn(); + let sort_iconn = sortdb.index_handle_at_tip(); let mut microblock_builder = StacksMicroblockBuilder::resume_unconfirmed( &mut inner_node.chainstate, &sort_iconn, @@ -1385,13 +1401,16 @@ mod test { // process microblock stream to generate unconfirmed state let sortdb = peer.sortdb.take().unwrap(); + let iconn = sortdb + .index_handle_at_block(&peer.chainstate(), &canonical_tip) + .unwrap(); peer.chainstate() - .reload_unconfirmed_state(&sortdb.index_conn(), canonical_tip.clone()) + .reload_unconfirmed_state(&iconn, canonical_tip.clone()) .unwrap(); let db_recv_balance = peer .chainstate() - .with_read_only_unconfirmed_clarity_tx(&sortdb.index_conn(), |clarity_tx| { + .with_read_only_unconfirmed_clarity_tx(&iconn, |clarity_tx| { clarity_tx.with_clarity_db_readonly(|clarity_db| { clarity_db .get_account_stx_balance(&recv_addr.into()) diff --git a/stackslib/src/chainstate/stacks/index/cache.rs b/stackslib/src/chainstate/stacks/index/cache.rs index a7116034470..7f92efdd8b7 100644 --- a/stackslib/src/chainstate/stacks/index/cache.rs +++ b/stackslib/src/chainstate/stacks/index/cache.rs @@ -27,11 +27,12 @@ use std::{cmp, env, error, fmt, fs, io, os}; use rusqlite::types::{FromSql, ToSql}; use rusqlite::{ Connection, Error as SqliteError, ErrorCode as SqliteErrorCode, OpenFlags, OptionalExtension, - Transaction, NO_PARAMS, + Transaction, }; use stacks_common::types::chainstate::{ BlockHeaderHash, TrieHash, BLOCK_HEADER_HASH_ENCODED_SIZE, TRIEHASH_ENCODED_SIZE, }; +use stacks_common::types::sqlite::NO_PARAMS; use crate::chainstate::stacks::index::bits::{ get_node_byte_len, get_node_hash, read_block_identifier, read_hash_bytes, read_node_hash_bytes, diff --git a/stackslib/src/chainstate/stacks/index/file.rs b/stackslib/src/chainstate/stacks/index/file.rs index 1477f9a7ddb..4123b1310aa 100644 --- a/stackslib/src/chainstate/stacks/index/file.rs +++ b/stackslib/src/chainstate/stacks/index/file.rs @@ -28,11 +28,12 @@ use std::{cmp, env, error, fmt, fs, io, os}; use rusqlite::types::{FromSql, ToSql}; use rusqlite::{ Connection, Error as SqliteError, ErrorCode as SqliteErrorCode, OpenFlags, OptionalExtension, - Transaction, NO_PARAMS, + Transaction, }; use stacks_common::types::chainstate::{ BlockHeaderHash, TrieHash, BLOCK_HEADER_HASH_ENCODED_SIZE, TRIEHASH_ENCODED_SIZE, }; +use stacks_common::types::sqlite::NO_PARAMS; use crate::chainstate::stacks::index::bits::{ get_node_byte_len, get_node_hash, read_block_identifier, read_hash_bytes, read_node_hash_bytes, diff --git a/stackslib/src/chainstate/stacks/index/marf.rs b/stackslib/src/chainstate/stacks/index/marf.rs index 630454eabb5..d5dd77c51f8 100644 --- a/stackslib/src/chainstate/stacks/index/marf.rs +++ b/stackslib/src/chainstate/stacks/index/marf.rs @@ -1488,7 +1488,7 @@ impl MARF { self.open_chain_tip.as_ref().map(|x| &x.block_hash) } - /// Get open chain tip + /// Get open chain tip block height pub fn get_open_chain_tip_height(&self) -> Option { self.open_chain_tip.as_ref().map(|x| x.height) } diff --git a/stackslib/src/chainstate/stacks/index/storage.rs b/stackslib/src/chainstate/stacks/index/storage.rs index 97f7ca999ae..6994c7ad053 100644 --- a/stackslib/src/chainstate/stacks/index/storage.rs +++ b/stackslib/src/chainstate/stacks/index/storage.rs @@ -27,12 +27,13 @@ use std::{cmp, env, error, fmt, fs, io, os}; use rusqlite::types::{FromSql, ToSql}; use rusqlite::{ Connection, Error as SqliteError, ErrorCode as SqliteErrorCode, OpenFlags, OptionalExtension, - Transaction, NO_PARAMS, + Transaction, }; use sha2::Digest; use stacks_common::types::chainstate::{ BlockHeaderHash, TrieHash, BLOCK_HEADER_HASH_ENCODED_SIZE, TRIEHASH_ENCODED_SIZE, }; +use stacks_common::types::sqlite::NO_PARAMS; use stacks_common::util::hash::to_hex; use stacks_common::util::log; @@ -1656,7 +1657,7 @@ impl<'a, T: MarfTrieId> TrieStorageTransaction<'a, T> { // save the currently-buffered Trie to disk, and atomically put it into place (possibly to // a different block than the one opened, as indicated by final_bhh). // Runs once -- subsequent calls are no-ops. - // Panics on a failure to rename the Trie file into place (i.e. if the the actual commitment + // Panics on a failure to rename the Trie file into place (i.e. if the actual commitment // fails). self.clear_cached_ancestor_hashes_bytes(); if self.data.readonly { diff --git a/stackslib/src/chainstate/stacks/index/trie_sql.rs b/stackslib/src/chainstate/stacks/index/trie_sql.rs index be1ae91c21c..c9d3b40dcef 100644 --- a/stackslib/src/chainstate/stacks/index/trie_sql.rs +++ b/stackslib/src/chainstate/stacks/index/trie_sql.rs @@ -28,10 +28,13 @@ use std::{error, fmt, fs, io, os}; use regex::Regex; use rusqlite::blob::Blob; use rusqlite::types::{FromSql, ToSql}; -use rusqlite::{Connection, Error as SqliteError, OptionalExtension, Transaction, NO_PARAMS}; +use rusqlite::{ + params, Connection, DatabaseName, Error as SqliteError, OptionalExtension, Transaction, +}; use stacks_common::types::chainstate::{ BlockHeaderHash, TrieHash, BLOCK_HEADER_HASH_ENCODED_SIZE, TRIEHASH_ENCODED_SIZE, }; +use stacks_common::types::sqlite::NO_PARAMS; use stacks_common::util::log; use crate::chainstate::stacks::index::bits::{ @@ -220,7 +223,7 @@ pub fn get_block_hash(conn: &Connection, local_id: u32) -> Result let result = conn .query_row( "SELECT block_hash FROM marf_data WHERE block_id = ?", - &[local_id], + params![local_id], |row| row.get("block_hash"), ) .optional()?; @@ -236,7 +239,7 @@ pub fn write_trie_blob( block_hash: &T, data: &[u8], ) -> Result { - let args: &[&dyn ToSql] = &[block_hash, &data, &0, &0, &0]; + let args = params![block_hash, data, 0, 0, 0,]; let mut s = conn.prepare("INSERT INTO marf_data (block_hash, data, unconfirmed, external_offset, external_length) VALUES (?, ?, ?, ?, ?)")?; let block_id = s @@ -263,13 +266,13 @@ fn inner_write_external_trie_blob( let block_id = if let Some(block_id) = block_id { // existing entry (i.e. a migration) let empty_blob: &[u8] = &[]; - let args: &[&dyn ToSql] = &[ + let args = params![ block_hash, - &empty_blob, - &0, - &u64_to_sql(offset)?, - &u64_to_sql(length)?, - &block_id, + empty_blob, + 0, + u64_to_sql(offset)?, + u64_to_sql(length)?, + block_id, ]; let mut s = conn.prepare("UPDATE marf_data SET block_hash = ?1, data = ?2, unconfirmed = ?3, external_offset = ?4, external_length = ?5 WHERE block_id = ?6")?; @@ -283,12 +286,12 @@ fn inner_write_external_trie_blob( } else { // new entry let empty_blob: &[u8] = &[]; - let args: &[&dyn ToSql] = &[ + let args = params![ block_hash, - &empty_blob, - &0, - &u64_to_sql(offset)?, - &u64_to_sql(length)?, + empty_blob, + 0, + u64_to_sql(offset)?, + u64_to_sql(length)?, ]; let mut s = conn.prepare("INSERT INTO marf_data (block_hash, data, unconfirmed, external_offset, external_length) VALUES (?, ?, ?, ?, ?)")?; @@ -339,13 +342,13 @@ pub fn write_trie_blob_to_mined( ) -> Result { if let Ok(block_id) = get_mined_block_identifier(conn, block_hash) { // already exists; update - let args: &[&dyn ToSql] = &[&data, &block_id]; + let args = params![data, block_id]; let mut s = conn.prepare("UPDATE mined_blocks SET data = ? WHERE block_id = ?")?; s.execute(args) .expect("EXHAUSTION: MARF cannot track more than 2**31 - 1 blocks"); } else { // doesn't exist yet; insert - let args: &[&dyn ToSql] = &[block_hash, &data]; + let args = params![block_hash, data]; let mut s = conn.prepare("INSERT INTO mined_blocks (block_hash, data) VALUES (?, ?)")?; s.execute(args) .expect("EXHAUSTION: MARF cannot track more than 2**31 - 1 blocks"); @@ -372,13 +375,13 @@ pub fn write_trie_blob_to_unconfirmed( if let Ok(Some(block_id)) = get_unconfirmed_block_identifier(conn, block_hash) { // already exists; update - let args: &[&dyn ToSql] = &[&data, &block_id]; + let args = params![data, block_id]; let mut s = conn.prepare("UPDATE marf_data SET data = ? WHERE block_id = ?")?; s.execute(args) .expect("EXHAUSTION: MARF cannot track more than 2**31 - 1 blocks"); } else { // doesn't exist yet; insert - let args: &[&dyn ToSql] = &[block_hash, &data, &1]; + let args = params![block_hash, data, 1]; let mut s = conn.prepare("INSERT INTO marf_data (block_hash, data, unconfirmed, external_offset, external_length) VALUES (?, ?, ?, 0, 0)")?; s.execute(args) @@ -398,7 +401,7 @@ pub fn write_trie_blob_to_unconfirmed( /// Open a trie blob. Returns a Blob<'a> readable/writeable handle to it. pub fn open_trie_blob<'a>(conn: &'a Connection, block_id: u32) -> Result, Error> { let blob = conn.blob_open( - rusqlite::DatabaseName::Main, + DatabaseName::Main, "marf_data", "data", block_id.into(), @@ -410,7 +413,7 @@ pub fn open_trie_blob<'a>(conn: &'a Connection, block_id: u32) -> Result readable handle to it. pub fn open_trie_blob_readonly<'a>(conn: &'a Connection, block_id: u32) -> Result, Error> { let blob = conn.blob_open( - rusqlite::DatabaseName::Main, + DatabaseName::Main, "marf_data", "data", block_id.into(), @@ -429,7 +432,7 @@ pub fn read_all_block_hashes_and_roots( let rows = s.query_and_then(NO_PARAMS, |row| { let block_hash: T = row.get_unwrap("block_hash"); let data = row - .get_raw("data") + .get_ref("data")? .as_blob() .expect("DB Corruption: MARF data is non-blob"); let start = TrieStorageConnection::::root_ptr_disk() as usize; @@ -447,7 +450,7 @@ pub fn read_node_hash_bytes( ptr: &TriePtr, ) -> Result<(), Error> { let mut blob = conn.blob_open( - rusqlite::DatabaseName::Main, + DatabaseName::Main, "marf_data", "data", block_id.into(), @@ -469,13 +472,7 @@ pub fn read_node_hash_bytes_by_bhh( &[bhh], |r| r.get("block_id"), )?; - let mut blob = conn.blob_open( - rusqlite::DatabaseName::Main, - "marf_data", - "data", - row_id, - true, - )?; + let mut blob = conn.blob_open(DatabaseName::Main, "marf_data", "data", row_id, true)?; let hash_buff = bits_read_node_hash_bytes(&mut blob, ptr)?; w.write_all(&hash_buff).map_err(|e| e.into()) } @@ -487,7 +484,7 @@ pub fn read_node_type( ptr: &TriePtr, ) -> Result<(TrieNodeType, TrieHash), Error> { let mut blob = conn.blob_open( - rusqlite::DatabaseName::Main, + DatabaseName::Main, "marf_data", "data", block_id.into(), @@ -503,7 +500,7 @@ pub fn read_node_type_nohash( ptr: &TriePtr, ) -> Result { let mut blob = conn.blob_open( - rusqlite::DatabaseName::Main, + DatabaseName::Main, "marf_data", "data", block_id.into(), @@ -518,7 +515,7 @@ pub fn get_external_trie_offset_length( block_id: u32, ) -> Result<(u64, u64), Error> { let qry = "SELECT external_offset, external_length FROM marf_data WHERE block_id = ?1"; - let args: &[&dyn ToSql] = &[&block_id]; + let args = params![block_id]; let (offset, length) = query_row(conn, qry, args)?.ok_or(Error::NotFoundError)?; Ok((offset, length)) } @@ -529,7 +526,7 @@ pub fn get_external_trie_offset_length_by_bhh( bhh: &T, ) -> Result<(u64, u64), Error> { let qry = "SELECT external_offset, external_length FROM marf_data WHERE block_hash = ?1"; - let args: &[&dyn ToSql] = &[bhh]; + let args = params![bhh]; let (offset, length) = query_row(conn, qry, args)?.ok_or(Error::NotFoundError)?; Ok((offset, length)) } @@ -581,7 +578,7 @@ pub fn get_node_hash_bytes( ptr: &TriePtr, ) -> Result { let mut blob = conn.blob_open( - rusqlite::DatabaseName::Main, + DatabaseName::Main, "marf_data", "data", block_id.into(), @@ -601,13 +598,7 @@ pub fn get_node_hash_bytes_by_bhh( &[bhh], |r| r.get("block_id"), )?; - let mut blob = conn.blob_open( - rusqlite::DatabaseName::Main, - "marf_data", - "data", - row_id, - true, - )?; + let mut blob = conn.blob_open(DatabaseName::Main, "marf_data", "data", row_id, true)?; let hash_buff = bits_read_node_hash_bytes(&mut blob, ptr)?; Ok(TrieHash(hash_buff)) } diff --git a/stackslib/src/chainstate/stacks/miner.rs b/stackslib/src/chainstate/stacks/miner.rs index ec8ac4a36cc..f2dfdf5dffe 100644 --- a/stackslib/src/chainstate/stacks/miner.rs +++ b/stackslib/src/chainstate/stacks/miner.rs @@ -39,7 +39,9 @@ use stacks_common::util::secp256k1::{MessageSignature, Secp256k1PrivateKey}; use stacks_common::util::vrf::*; use crate::burnchains::{Burnchain, PrivateKey, PublicKey}; -use crate::chainstate::burn::db::sortdb::{SortitionDB, SortitionDBConn, SortitionHandleTx}; +use crate::chainstate::burn::db::sortdb::{ + SortitionDB, SortitionDBConn, SortitionHandleConn, SortitionHandleTx, +}; use crate::chainstate::burn::operations::*; use crate::chainstate::burn::*; use crate::chainstate::stacks::address::StacksAddressExtensions; @@ -131,6 +133,7 @@ pub fn signal_mining_blocked(miner_status: Arc>) { /// resume mining if we blocked it earlier pub fn signal_mining_ready(miner_status: Arc>) { + debug!("Signaling miner to resume"; "thread_id" => ?std::thread::current().id()); match miner_status.lock() { Ok(mut status) => { status.remove_blocked(); @@ -170,6 +173,8 @@ pub struct BlockBuilderSettings { } impl BlockBuilderSettings { + // TODO: add tests from mutation testing results #4873 + #[cfg_attr(test, mutants::skip)] pub fn limited() -> BlockBuilderSettings { BlockBuilderSettings { max_miner_time_ms: u64::MAX, @@ -179,6 +184,8 @@ impl BlockBuilderSettings { } } + // TODO: add tests from mutation testing results #4873 + #[cfg_attr(test, mutants::skip)] pub fn max_value() -> BlockBuilderSettings { BlockBuilderSettings { max_miner_time_ms: u64::MAX, @@ -337,7 +344,7 @@ pub enum TransactionResult { Success(TransactionSuccess), /// Transaction failed when processed. ProcessingError(TransactionError), - /// Transaction wasn't ready to be be processed, but might succeed later. + /// Transaction wasn't ready to be processed, but might succeed later. Skipped(TransactionSkipped), /// Transaction is problematic (e.g. a DDoS vector) and should be dropped. /// This error variant is a placeholder for fixing Clarity VM quirks in the next network @@ -353,7 +360,7 @@ pub enum TransactionEvent { Success(TransactionSuccessEvent), /// Transaction failed. It may succeed later depending on the error. ProcessingError(TransactionErrorEvent), - /// Transaction wasn't ready to be be processed, but might succeed later. + /// Transaction wasn't ready to be processed, but might succeed later. /// The bool represents whether mempool propagation should halt or continue Skipped(TransactionSkippedEvent), /// Transaction is problematic and will be dropped @@ -725,11 +732,11 @@ impl<'a> StacksMicroblockBuilder<'a> { anchor_block, anchor_block_consensus_hash, anchor_block_height, - runtime: runtime, + runtime, clarity_tx: Some(clarity_tx), header_reader, unconfirmed: false, - settings: settings, + settings, ast_rules, }) } @@ -803,11 +810,11 @@ impl<'a> StacksMicroblockBuilder<'a> { anchor_block: anchored_block_hash, anchor_block_consensus_hash: anchored_consensus_hash, anchor_block_height: anchored_block_height, - runtime: runtime, + runtime, clarity_tx: Some(clarity_tx), header_reader, unconfirmed: true, - settings: settings, + settings, ast_rules, }) } @@ -1197,7 +1204,6 @@ impl<'a> StacksMicroblockBuilder<'a> { intermediate_result = mem_pool.iterate_candidates( &mut clarity_tx, &mut tx_events, - self.anchor_block_height, mempool_settings.clone(), |clarity_tx, to_consider, estimator| { let mempool_tx = &to_consider.tx; @@ -1279,14 +1285,14 @@ impl<'a> StacksMicroblockBuilder<'a> { // Make the block from the transactions we did manage to get debug!("Block budget exceeded on tx {}", &mempool_tx.tx.txid()); if block_limit_hit == BlockLimitFunction::NO_LIMIT_HIT { - debug!("Block budget exceeded while mining microblock"; + debug!("Block budget exceeded while mining microblock"; "tx" => %mempool_tx.tx.txid(), "next_behavior" => "Switch to mining stx-transfers only"); block_limit_hit = BlockLimitFunction::CONTRACT_LIMIT_HIT; } else if block_limit_hit == BlockLimitFunction::CONTRACT_LIMIT_HIT { - debug!("Block budget exceeded while mining microblock"; + debug!("Block budget exceeded while mining microblock"; "tx" => %mempool_tx.tx.txid(), "next_behavior" => "Stop mining microblock"); block_limit_hit = BlockLimitFunction::LIMIT_REACHED; return Ok(None); @@ -1493,6 +1499,7 @@ impl StacksBlockBuilder { burn_header_timestamp: genesis_burn_header_timestamp, burn_header_height: genesis_burn_header_height, anchored_block_size: 0, + burn_view: None, }; let mut builder = StacksBlockBuilder::from_parent_pubkey_hash( @@ -1793,6 +1800,8 @@ impl StacksBlockBuilder { } } + // TODO: add tests from mutation testing results #4859 + #[cfg_attr(test, mutants::skip)] /// This function should be called before `epoch_begin`. /// It loads the parent microblock stream, sets the parent microblock, and returns /// data necessary for `epoch_begin`. @@ -1803,7 +1812,7 @@ impl StacksBlockBuilder { pub fn pre_epoch_begin<'a>( &mut self, chainstate: &'a mut StacksChainState, - burn_dbconn: &'a SortitionDBConn, + burn_dbconn: &'a SortitionHandleConn, confirm_microblocks: bool, ) -> Result, Error> { debug!( @@ -1912,7 +1921,7 @@ impl StacksBlockBuilder { /// returned ClarityTx object. pub fn epoch_begin<'a, 'b>( &mut self, - burn_dbconn: &'a SortitionDBConn, + burn_dbconn: &'a SortitionHandleConn, info: &'b mut MinerEpochInfo<'a>, ) -> Result<(ClarityTx<'b, 'b>, ExecutionCost), Error> { let SetupBlockResult { @@ -1974,7 +1983,7 @@ impl StacksBlockBuilder { pub fn make_anchored_block_from_txs( builder: StacksBlockBuilder, chainstate_handle: &StacksChainState, - burn_dbconn: &SortitionDBConn, + burn_dbconn: &SortitionHandleConn, txs: Vec, ) -> Result<(StacksBlock, u64, ExecutionCost), Error> { Self::make_anchored_block_and_microblock_from_txs( @@ -1993,7 +2002,7 @@ impl StacksBlockBuilder { pub fn make_anchored_block_and_microblock_from_txs( mut builder: StacksBlockBuilder, chainstate_handle: &StacksChainState, - burn_dbconn: &SortitionDBConn, + burn_dbconn: &SortitionHandleConn, mut txs: Vec, mut mblock_txs: Vec, ) -> Result<(StacksBlock, u64, ExecutionCost, Option), Error> { @@ -2047,6 +2056,8 @@ impl StacksBlockBuilder { Ok((block, size, cost, mblock_opt)) } + // TODO: add tests from mutation testing results #4860 + #[cfg_attr(test, mutants::skip)] /// Create a block builder for mining pub fn make_block_builder( burnchain: &Burnchain, @@ -2101,6 +2112,8 @@ impl StacksBlockBuilder { Ok(builder) } + // TODO: add tests from mutation testing results #4860 + #[cfg_attr(test, mutants::skip)] /// Create a block builder for regtest mining pub fn make_regtest_block_builder( burnchain: &Burnchain, @@ -2197,7 +2210,6 @@ impl StacksBlockBuilder { intermediate_result = mempool.iterate_candidates( epoch_tx, &mut tx_events, - tip_height, mempool_settings.clone(), |epoch_tx, to_consider, estimator| { // first, have we been preempted? @@ -2332,7 +2344,7 @@ impl StacksBlockBuilder { // if we have an invalid transaction that was quietly ignored, don't warn here either } e => { - warn!("Failed to apply tx {}: {:?}", &txinfo.tx.txid(), &e); + info!("Failed to apply tx {}: {:?}", &txinfo.tx.txid(), &e); return Ok(Some(result_event)); } } @@ -2381,11 +2393,14 @@ impl StacksBlockBuilder { Ok((blocked, tx_events)) } + // TODO: add tests from mutation testing results #4861 + // Or keep the skip and remove the comment + #[cfg_attr(test, mutants::skip)] /// Given access to the mempool, mine an anchored block with no more than the given execution cost. /// returns the assembled block, and the consumed execution budget. pub fn build_anchored_block( chainstate_handle: &StacksChainState, // not directly used; used as a handle to open other chainstates - burn_dbconn: &SortitionDBConn, + burn_dbconn: &SortitionHandleConn, mempool: &mut MemPoolDB, parent_stacks_header: &StacksHeaderInfo, // Stacks header we're building off of total_burn: u64, // the burn so far on the burnchain (i.e. from the last burnchain block) @@ -2499,7 +2514,7 @@ impl StacksBlockBuilder { info!( "Miner: mined anchored block"; - "block_hash" => %block.block_hash(), + "stacks_block_hash" => %block.block_hash(), "height" => block.header.total_work.work, "tx_count" => block.txs.len(), "parent_stacks_block_hash" => %block.header.parent_block, diff --git a/stackslib/src/chainstate/stacks/mod.rs b/stackslib/src/chainstate/stacks/mod.rs index f9ad4fff3fb..35c82f9b94e 100644 --- a/stackslib/src/chainstate/stacks/mod.rs +++ b/stackslib/src/chainstate/stacks/mod.rs @@ -27,8 +27,8 @@ use clarity::vm::types::{ PrincipalData, QualifiedContractIdentifier, StandardPrincipalData, Value, }; use clarity::vm::ClarityVersion; -use rusqlite::types::{FromSql, FromSqlError, FromSqlResult, ToSqlOutput, ValueRef}; -use rusqlite::{Error as RusqliteError, ToSql}; +use rusqlite::types::{FromSql, FromSqlError, FromSqlResult, ToSql, ToSqlOutput, ValueRef}; +use rusqlite::Error as RusqliteError; use serde::{Deserialize, Serialize}; use serde_json::json; use sha2::{Digest, Sha512_256}; @@ -99,6 +99,8 @@ pub enum Error { StacksTransactionSkipped(String), PostConditionFailed(String), NoSuchBlockError, + /// The supplied Sortition IDs, consensus hashes, or stacks blocks are not in the same fork. + NotInSameFork, InvalidChainstateDB, BlockTooBigError, TransactionTooBigError, @@ -224,6 +226,9 @@ impl fmt::Display for Error { Error::NoRegisteredSigners(reward_cycle) => { write!(f, "No registered signers for reward cycle {reward_cycle}") } + Error::NotInSameFork => { + write!(f, "The supplied block identifiers are not in the same fork") + } } } } @@ -268,6 +273,7 @@ impl error::Error for Error { Error::InvalidChildOfNakomotoBlock => None, Error::ExpectedTenureChange => None, Error::NoRegisteredSigners(_) => None, + Error::NotInSameFork => None, } } } @@ -312,6 +318,7 @@ impl Error { Error::InvalidChildOfNakomotoBlock => "InvalidChildOfNakomotoBlock", Error::ExpectedTenureChange => "ExpectedTenureChange", Error::NoRegisteredSigners(_) => "NoRegisteredSigners", + Error::NotInSameFork => "NotInSameFork", } } @@ -506,6 +513,13 @@ pub enum MultisigHashMode { P2WSH = 0x03, } +#[repr(u8)] +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] +pub enum OrderIndependentMultisigHashMode { + P2SH = 0x05, + P2WSH = 0x07, +} + impl SinglesigHashMode { pub fn to_address_hash_mode(&self) -> AddressHashMode { match *self { @@ -556,6 +570,35 @@ impl MultisigHashMode { } } +impl OrderIndependentMultisigHashMode { + pub fn to_address_hash_mode(&self) -> AddressHashMode { + match *self { + OrderIndependentMultisigHashMode::P2SH => AddressHashMode::SerializeP2SH, + OrderIndependentMultisigHashMode::P2WSH => AddressHashMode::SerializeP2WSH, + } + } + + pub fn from_address_hash_mode(hm: AddressHashMode) -> Option { + match hm { + AddressHashMode::SerializeP2SH => Some(OrderIndependentMultisigHashMode::P2SH), + AddressHashMode::SerializeP2WSH => Some(OrderIndependentMultisigHashMode::P2WSH), + _ => None, + } + } + + pub fn from_u8(n: u8) -> Option { + match n { + x if x == OrderIndependentMultisigHashMode::P2SH as u8 => { + Some(OrderIndependentMultisigHashMode::P2SH) + } + x if x == OrderIndependentMultisigHashMode::P2WSH as u8 => { + Some(OrderIndependentMultisigHashMode::P2WSH) + } + _ => None, + } + } +} + /// A structure that encodes enough state to authenticate /// a transaction's execution against a Stacks address. /// public_keys + signatures_required determines the Principal. @@ -580,10 +623,21 @@ pub struct SinglesigSpendingCondition { pub signature: MessageSignature, } +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] +pub struct OrderIndependentMultisigSpendingCondition { + pub hash_mode: OrderIndependentMultisigHashMode, + pub signer: Hash160, + pub nonce: u64, // nth authorization from this account + pub tx_fee: u64, // microSTX/compute rate offered by this account + pub fields: Vec, + pub signatures_required: u16, +} + #[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] pub enum TransactionSpendingCondition { Singlesig(SinglesigSpendingCondition), Multisig(MultisigSpendingCondition), + OrderIndependentMultisig(OrderIndependentMultisigSpendingCondition), } /// Types of transaction authorizations @@ -1079,12 +1133,18 @@ pub const MAX_MICROBLOCK_SIZE: u32 = 65536; #[cfg(test)] pub mod test { + use clarity::util::get_epoch_time_secs; use clarity::vm::representations::{ClarityName, ContractName}; use clarity::vm::ClarityVersion; + use stacks_common::bitvec::BitVec; use stacks_common::util::hash::*; use stacks_common::util::log; + use stacks_common::util::secp256k1::Secp256k1PrivateKey; use super::*; + use crate::chainstate::burn::BlockSnapshot; + use crate::chainstate::nakamoto::miner::NakamotoBlockBuilder; + use crate::chainstate::nakamoto::{NakamotoBlock, NakamotoBlockHeader}; use crate::chainstate::stacks::{StacksPublicKey as PubKey, *}; use crate::core::*; use crate::net::codec::test::check_codec_and_corruption; @@ -1097,6 +1157,7 @@ pub mod test { chain_id: u32, anchor_mode: &TransactionAnchorMode, post_condition_mode: &TransactionPostConditionMode, + epoch_id: StacksEpochId, ) -> Vec { let addr = StacksAddress { version: 1, @@ -1130,7 +1191,7 @@ pub mod test { signature: MessageSignature([3u8; 65]), }; - let spending_conditions = vec![ + let mut spending_conditions = vec![ TransactionSpendingCondition::Singlesig(SinglesigSpendingCondition { signer: Hash160([0x11; 20]), hash_mode: SinglesigHashMode::P2PKH, @@ -1190,9 +1251,50 @@ pub mod test { TransactionAuthField::PublicKey(PubKey::from_hex("03ef2340518b5867b23598a9cf74611f8b98064f7d55cdb8c107c67b5efcbc5c77").unwrap()) ], signatures_required: 2 - }) + }), ]; + if epoch_id >= StacksEpochId::Epoch30 { + spending_conditions.append(&mut vec![ + TransactionSpendingCondition::OrderIndependentMultisig(OrderIndependentMultisigSpendingCondition { + signer: Hash160([0x11; 20]), + hash_mode: OrderIndependentMultisigHashMode::P2WSH, + nonce: 678, + tx_fee: 901, + fields: vec![ + TransactionAuthField::Signature(TransactionPublicKeyEncoding::Compressed, MessageSignature::from_raw(&vec![0xff; 65])), + TransactionAuthField::Signature(TransactionPublicKeyEncoding::Compressed, MessageSignature::from_raw(&vec![0xfe; 65])), + TransactionAuthField::PublicKey(PubKey::from_hex("03ef2340518b5867b23598a9cf74611f8b98064f7d55cdb8c107c67b5efcbc5c77").unwrap()) + ], + signatures_required: 2 + }), + TransactionSpendingCondition::OrderIndependentMultisig(OrderIndependentMultisigSpendingCondition { + signer: Hash160([0x11; 20]), + hash_mode: OrderIndependentMultisigHashMode::P2SH, + nonce: 345, + tx_fee: 678, + fields: vec![ + TransactionAuthField::Signature(TransactionPublicKeyEncoding::Uncompressed, MessageSignature::from_raw(&vec![0xff; 65])), + TransactionAuthField::Signature(TransactionPublicKeyEncoding::Uncompressed, MessageSignature::from_raw(&vec![0xfe; 65])), + TransactionAuthField::PublicKey(PubKey::from_hex("04ef2340518b5867b23598a9cf74611f8b98064f7d55cdb8c107c67b5efcbc5c771f112f919b00a6c6c5f51f7c63e1762fe9fac9b66ec75a053db7f51f4a52712b").unwrap()), + ], + signatures_required: 2 + }), + TransactionSpendingCondition::OrderIndependentMultisig(OrderIndependentMultisigSpendingCondition { + signer: Hash160([0x11; 20]), + hash_mode: OrderIndependentMultisigHashMode::P2SH, + nonce: 456, + tx_fee: 789, + fields: vec![ + TransactionAuthField::Signature(TransactionPublicKeyEncoding::Compressed, MessageSignature::from_raw(&vec![0xff; 65])), + TransactionAuthField::Signature(TransactionPublicKeyEncoding::Compressed, MessageSignature::from_raw(&vec![0xfe; 65])), + TransactionAuthField::PublicKey(PubKey::from_hex("03ef2340518b5867b23598a9cf74611f8b98064f7d55cdb8c107c67b5efcbc5c77").unwrap()) + ], + signatures_required: 2 + }), + ]) + } + let mut tx_auths = vec![]; for i in 0..spending_conditions.len() { let spending_condition = &spending_conditions[i]; @@ -1340,7 +1442,7 @@ pub mod test { }; let proof_bytes = hex_bytes("9275df67a68c8745c0ff97b48201ee6db447f7c93b23ae24cdc2400f52fdb08a1a6ac7ec71bf9c9c76e96ee4675ebff60625af28718501047bfd87b810c2d2139b73c23bd69de66360953a642c2a330a").unwrap(); let proof = VRFProof::from_bytes(&proof_bytes[..].to_vec()).unwrap(); - let tx_payloads = vec![ + let mut tx_payloads = vec![ TransactionPayload::TokenTransfer( stx_address.into(), 123, @@ -1384,48 +1486,60 @@ pub mod test { }, Some(ClarityVersion::Clarity2), ), - TransactionPayload::Coinbase(CoinbasePayload([0x12; 32]), None, None), - TransactionPayload::Coinbase( - CoinbasePayload([0x12; 32]), - Some(PrincipalData::Contract( - QualifiedContractIdentifier::transient(), - )), - None, - ), - TransactionPayload::Coinbase( - CoinbasePayload([0x12; 32]), - Some(PrincipalData::Standard(StandardPrincipalData( - 0x01, [0x02; 20], - ))), - None, - ), - TransactionPayload::Coinbase(CoinbasePayload([0x12; 32]), None, Some(proof.clone())), - TransactionPayload::Coinbase( - CoinbasePayload([0x12; 32]), - Some(PrincipalData::Contract( - QualifiedContractIdentifier::transient(), - )), - Some(proof.clone()), - ), - TransactionPayload::Coinbase( - CoinbasePayload([0x12; 32]), - Some(PrincipalData::Standard(StandardPrincipalData( - 0x01, [0x02; 20], - ))), - Some(proof.clone()), - ), TransactionPayload::PoisonMicroblock(mblock_header_1, mblock_header_2), - TransactionPayload::TenureChange(TenureChangePayload { - tenure_consensus_hash: ConsensusHash([0x01; 20]), - prev_tenure_consensus_hash: ConsensusHash([0x02; 20]), - burn_view_consensus_hash: ConsensusHash([0x03; 20]), - previous_tenure_end: StacksBlockId([0x00; 32]), - previous_tenure_blocks: 0, - cause: TenureChangeCause::BlockFound, - pubkey_hash: Hash160([0x00; 20]), - }), ]; + if epoch_id >= StacksEpochId::Epoch30 { + tx_payloads.append(&mut vec![ + TransactionPayload::TenureChange(TenureChangePayload { + tenure_consensus_hash: ConsensusHash([0x01; 20]), + prev_tenure_consensus_hash: ConsensusHash([0x02; 20]), + burn_view_consensus_hash: ConsensusHash([0x03; 20]), + previous_tenure_end: StacksBlockId([0x00; 32]), + previous_tenure_blocks: 0, + cause: TenureChangeCause::BlockFound, + pubkey_hash: Hash160([0x00; 20]), + }), + TransactionPayload::Coinbase( + CoinbasePayload([0x12; 32]), + None, + Some(proof.clone()), + ), + TransactionPayload::Coinbase( + CoinbasePayload([0x12; 32]), + Some(PrincipalData::Contract( + QualifiedContractIdentifier::transient(), + )), + Some(proof.clone()), + ), + TransactionPayload::Coinbase( + CoinbasePayload([0x12; 32]), + Some(PrincipalData::Standard(StandardPrincipalData( + 0x01, [0x02; 20], + ))), + Some(proof.clone()), + ), + ]) + } else { + tx_payloads.append(&mut vec![ + TransactionPayload::Coinbase(CoinbasePayload([0x12; 32]), None, None), + TransactionPayload::Coinbase( + CoinbasePayload([0x12; 32]), + Some(PrincipalData::Contract( + QualifiedContractIdentifier::transient(), + )), + None, + ), + TransactionPayload::Coinbase( + CoinbasePayload([0x12; 32]), + Some(PrincipalData::Standard(StandardPrincipalData( + 0x01, [0x02; 20], + ))), + None, + ), + ]) + } + // create all kinds of transactions let mut all_txs = vec![]; for tx_auth in tx_auths.iter() { @@ -1464,7 +1578,7 @@ pub mod test { all_txs } - pub fn make_codec_test_block(num_txs: usize) -> StacksBlock { + pub fn make_codec_test_block(num_txs: usize, epoch_id: StacksEpochId) -> StacksBlock { let proof_bytes = hex_bytes("9275df67a68c8745c0ff97b48201ee6db447f7c93b23ae24cdc2400f52fdb08a1a6ac7ec71bf9c9c76e96ee4675ebff60625af28718501047bfd87b810c2d2139b73c23bd69de66360953a642c2a330a").unwrap(); let proof = VRFProof::from_bytes(&proof_bytes[..].to_vec()).unwrap(); @@ -1483,6 +1597,11 @@ pub mod test { origin_auth.clone(), TransactionPayload::Coinbase(CoinbasePayload([0u8; 32]), None, None), ); + let tx_coinbase_proof = StacksTransaction::new( + TransactionVersion::Mainnet, + origin_auth.clone(), + TransactionPayload::Coinbase(CoinbasePayload([0u8; 32]), None, Some(proof.clone())), + ); tx_coinbase.anchor_mode = TransactionAnchorMode::OnChainOnly; @@ -1491,11 +1610,17 @@ pub mod test { 0x80000000, &TransactionAnchorMode::OnChainOnly, &TransactionPostConditionMode::Allow, + epoch_id, ); // remove all coinbases, except for an initial coinbase let mut txs_anchored = vec![]; - txs_anchored.push(tx_coinbase); + + if epoch_id >= StacksEpochId::Epoch30 { + txs_anchored.push(tx_coinbase_proof); + } else { + txs_anchored.push(tx_coinbase); + } for tx in all_txs.drain(..) { match tx.payload { @@ -1545,6 +1670,67 @@ pub mod test { } } + pub fn make_codec_test_nakamoto_block( + epoch_id: StacksEpochId, + miner_privk: &StacksPrivateKey, + ) -> NakamotoBlock { + let proof_bytes = hex_bytes("9275df67a68c8745c0ff97b48201ee6db447f7c93b23ae24cdc2400f52fdb08a1a6ac7ec71bf9c9c76e96ee4675ebff60625af28718501047bfd87b810c2d2139b73c23bd69de66360953a642c2a330a").unwrap(); + let proof = VRFProof::from_bytes(&proof_bytes[..].to_vec()).unwrap(); + + let privk = StacksPrivateKey::from_hex( + "6d430bb91222408e7706c9001cfaeb91b08c2be6d5ac95779ab52c6b431950e001", + ) + .unwrap(); + + let stx_address = StacksAddress { + version: 1, + bytes: Hash160([0xff; 20]), + }; + let payload = TransactionPayload::TokenTransfer( + stx_address.into(), + 123, + TokenTransferMemo([0u8; 34]), + ); + + let auth = TransactionAuth::from_p2pkh(miner_privk).unwrap(); + let addr = auth.origin().address_testnet(); + let mut tx = StacksTransaction::new(TransactionVersion::Testnet, auth, payload); + tx.chain_id = 0x80000000; + tx.auth.set_origin_nonce(34); + tx.set_post_condition_mode(TransactionPostConditionMode::Allow); + tx.set_tx_fee(300); + let mut tx_signer = StacksTransactionSigner::new(&tx); + tx_signer.sign_origin(miner_privk).unwrap(); + let tx = tx_signer.get_tx().unwrap(); + + let txid_vecs = vec![tx.txid().as_bytes().to_vec()]; + let txs_anchored = vec![tx]; + let merkle_tree = MerkleTree::::new(&txid_vecs); + let tx_merkle_root = merkle_tree.root(); + + let header = NakamotoBlockHeader { + version: 0x00, + chain_length: 107, + burn_spent: 25000, + consensus_hash: MINER_BLOCK_CONSENSUS_HASH.clone(), + parent_block_id: StacksBlockId::from_bytes(&[0x11; 32]).unwrap(), + tx_merkle_root, + state_index_root: TrieHash::from_hex( + "fb419c3d8f40ae154018f2abf3935e2275a14c091e071bacaf6cbf5579743a0f", + ) + .unwrap(), + timestamp: get_epoch_time_secs(), + miner_signature: MessageSignature::empty(), + signer_signature: Vec::new(), + pox_treatment: BitVec::ones(8).unwrap(), + }; + + NakamotoBlock { + header, + txs: txs_anchored, + } + } + pub fn make_codec_test_microblock(num_txs: usize) -> StacksMicroblock { let privk = StacksPrivateKey::from_hex( "6d430bb91222408e7706c9001cfaeb91b08c2be6d5ac95779ab52c6b431950e001", @@ -1561,6 +1747,7 @@ pub mod test { 0x80000000, &TransactionAnchorMode::OffChainOnly, &TransactionPostConditionMode::Allow, + StacksEpochId::latest(), ); let txs_mblock: Vec<_> = all_txs.into_iter().take(num_txs).collect(); diff --git a/stackslib/src/chainstate/stacks/tests/accounting.rs b/stackslib/src/chainstate/stacks/tests/accounting.rs index 8d65e40a4ed..90338033256 100644 --- a/stackslib/src/chainstate/stacks/tests/accounting.rs +++ b/stackslib/src/chainstate/stacks/tests/accounting.rs @@ -228,7 +228,7 @@ fn test_bad_microblock_fees_pre_v210() { anchored_txs.push(stx_transfer); } - let sort_ic = sortdb.index_conn(); + let sort_ic = sortdb.index_handle_at_tip(); let (parent_mblock_stream, mblock_pubkey_hash) = { if tenure_id > 0 { chainstate @@ -357,8 +357,8 @@ fn test_bad_microblock_fees_pre_v210() { let matured_reward_opt = StacksChainState::get_matured_miner_payment( peer.chainstate().db(), - &parent_block_id, - &block_id, + &parent_block_id.into(), + &block_id.into(), ) .unwrap(); @@ -551,7 +551,7 @@ fn test_bad_microblock_fees_fix_transition() { anchored_txs.push(stx_transfer); } - let sort_ic = sortdb.index_conn(); + let sort_ic = sortdb.index_handle_at_tip(); let (parent_mblock_stream, mblock_pubkey_hash) = { if tenure_id > 0 { chainstate @@ -680,8 +680,8 @@ fn test_bad_microblock_fees_fix_transition() { let matured_reward_opt = StacksChainState::get_matured_miner_payment( peer.chainstate().db(), - &parent_block_id, - &block_id, + &parent_block_id.into(), + &block_id.into(), ) .unwrap(); @@ -907,7 +907,7 @@ fn test_get_block_info_v210() { anchored_txs.push(stx_transfer); } - let sort_ic = sortdb.index_conn(); + let sort_ic = sortdb.index_handle_at_tip(); let (parent_mblock_stream, mblock_pubkey_hash) = { if tenure_id > 0 { chainstate @@ -1029,7 +1029,7 @@ fn test_get_block_info_v210() { peer .chainstate() .with_read_only_clarity_tx( - &sortdb.index_conn(), + &sortdb.index_handle_at_tip(), &stacks_block_id, |clarity_tx| { let list_val = clarity_tx.with_readonly_clarity_env( @@ -1296,7 +1296,7 @@ fn test_get_block_info_v210_no_microblocks() { ) .unwrap(); - let sort_ic = sortdb.index_conn(); + let sort_ic = sortdb.index_handle_at_tip(); let anchored_block = StacksBlockBuilder::make_anchored_block_from_txs( builder, chainstate, @@ -1333,7 +1333,7 @@ fn test_get_block_info_v210_no_microblocks() { peer .chainstate() .with_read_only_clarity_tx( - &sortdb.index_conn(), + &sortdb.index_handle_at_tip(), &stacks_block_id, |clarity_tx| { let list_val = clarity_tx.with_readonly_clarity_env( @@ -1678,7 +1678,7 @@ fn test_coinbase_pay_to_alt_recipient_v210(pay_to_contract: bool) { anchored_txs.push(stx_transfer); } - let sort_ic = sortdb.index_conn(); + let sort_ic = sortdb.index_handle_at_tip(); let (parent_mblock_stream, mblock_pubkey_hash) = { if tenure_id > 0 { chainstate @@ -1803,7 +1803,7 @@ fn test_coinbase_pay_to_alt_recipient_v210(pay_to_contract: bool) { peer .chainstate() .with_read_only_clarity_tx( - &sortdb.index_conn(), + &sortdb.index_handle_at_tip(), &stacks_block_id, |clarity_tx| { let list_val = clarity_tx.with_readonly_clarity_env( @@ -1911,29 +1911,33 @@ fn test_coinbase_pay_to_alt_recipient_v210(pay_to_contract: bool) { // reported correctly. let recipient_balance = peer .chainstate() - .with_read_only_clarity_tx(&sortdb.index_conn(), &stacks_block_id, |clarity_tx| { - let recipient_balance_val = clarity_tx - .with_readonly_clarity_env( - false, - CHAIN_ID_TESTNET, - ClarityVersion::Clarity2, - PrincipalData::parse("SP3Q4A5WWZ80REGBN0ZXNE540ECJ9JZ4A765Q5K2Q").unwrap(), - None, - LimitedCostTracker::new_free(), - |env| { - if pay_to_contract { - env.eval_raw(&format!( - "(stx-get-balance '{}.{})", - &addr_anchored, contract_name - )) - } else { - env.eval_raw(&format!("(stx-get-balance '{})", &addr_recipient)) - } - }, - ) - .unwrap(); - recipient_balance_val.expect_u128().unwrap() - }) + .with_read_only_clarity_tx( + &sortdb.index_handle_at_tip(), + &stacks_block_id, + |clarity_tx| { + let recipient_balance_val = clarity_tx + .with_readonly_clarity_env( + false, + CHAIN_ID_TESTNET, + ClarityVersion::Clarity2, + PrincipalData::parse("SP3Q4A5WWZ80REGBN0ZXNE540ECJ9JZ4A765Q5K2Q").unwrap(), + None, + LimitedCostTracker::new_free(), + |env| { + if pay_to_contract { + env.eval_raw(&format!( + "(stx-get-balance '{}.{})", + &addr_anchored, contract_name + )) + } else { + env.eval_raw(&format!("(stx-get-balance '{})", &addr_recipient)) + } + }, + ) + .unwrap(); + recipient_balance_val.expect_u128().unwrap() + }, + ) .unwrap(); // N.B. `stx-get-balance` will reflect one more block-reward than `get-block-info? diff --git a/stackslib/src/chainstate/stacks/tests/block_construction.rs b/stackslib/src/chainstate/stacks/tests/block_construction.rs index ae428af15fc..41942078403 100644 --- a/stackslib/src/chainstate/stacks/tests/block_construction.rs +++ b/stackslib/src/chainstate/stacks/tests/block_construction.rs @@ -32,6 +32,7 @@ use clarity::vm::test_util::TEST_BURN_STATE_DB; use clarity::vm::types::*; use rand::seq::SliceRandom; use rand::{thread_rng, Rng}; +use rusqlite::params; use stacks_common::address::*; use stacks_common::types::chainstate::SortitionId; use stacks_common::util::hash::MerkleTree; @@ -130,7 +131,7 @@ fn test_build_anchored_blocks_empty() { let anchored_block = StacksBlockBuilder::build_anchored_block( chainstate, - &sortdb.index_conn(), + &sortdb.index_handle_at_tip(), &mut mempool, &parent_tip, tip.total_burn, @@ -254,7 +255,7 @@ fn test_build_anchored_blocks_stx_transfers_single() { let anchored_block = StacksBlockBuilder::build_anchored_block( chainstate, - &sortdb.index_conn(), + &sortdb.index_handle_at_tip(), &mut mempool, &parent_tip, tip.total_burn, @@ -391,7 +392,7 @@ fn test_build_anchored_blocks_empty_with_builder_timeout() { let anchored_block = StacksBlockBuilder::build_anchored_block( chainstate, - &sortdb.index_conn(), + &sortdb.index_handle_at_tip(), &mut mempool, &parent_tip, tip.total_burn, @@ -557,7 +558,7 @@ fn test_build_anchored_blocks_stx_transfers_multi() { let anchored_block = StacksBlockBuilder::build_anchored_block( chainstate, - &sortdb.index_conn(), + &sortdb.index_handle_at_tip(), &mut mempool, &parent_tip, tip.total_burn, @@ -712,7 +713,7 @@ fn test_build_anchored_blocks_connected_by_microblocks_across_epoch() { MemPoolDB::open_test(false, 0x80000000, &chainstate_path).unwrap(); let coinbase_tx = make_coinbase(miner, tenure_id); - let sort_ic = sortdb.index_conn(); + let sort_ic = sortdb.index_handle_at_tip(); let (parent_mblock_stream, mblock_pubkey_hash) = { if tenure_id > 0 { chainstate @@ -968,7 +969,7 @@ fn test_build_anchored_blocks_connected_by_microblocks_across_epoch_invalid() { MemPoolDB::open_test(false, 0x80000000, &chainstate_path).unwrap(); let coinbase_tx = make_coinbase(miner, tenure_id); - let sort_ic = sortdb.index_conn(); + let sort_ic = sortdb.index_handle_at_tip(); let (parent_mblock_stream, mblock_pubkey_hash) = { if tenure_id > 0 { chainstate @@ -1250,7 +1251,7 @@ fn test_build_anchored_blocks_incrementing_nonces() { let anchored_block = StacksBlockBuilder::build_anchored_block( chainstate, - &sortdb.index_conn(), + &sortdb.index_handle_at_tip(), &mut mempool, &parent_tip, tip.total_burn, @@ -1498,7 +1499,7 @@ fn test_build_anchored_blocks_skip_too_expensive() { let anchored_block = StacksBlockBuilder::build_anchored_block( chainstate, - &sortdb.index_conn(), + &sortdb.index_handle_at_tip(), &mut mempool, &parent_tip, tip.total_burn, @@ -1652,7 +1653,7 @@ fn test_build_anchored_blocks_multiple_chaintips() { StacksBlockBuilder::build_anchored_block( chainstate, - &sortdb.index_conn(), + &sortdb.index_handle_at_tip(), mempool_to_use, &parent_tip, tip.total_burn, @@ -1759,7 +1760,7 @@ fn test_build_anchored_blocks_empty_chaintips() { let anchored_block = StacksBlockBuilder::build_anchored_block( chainstate, - &sortdb.index_conn(), + &sortdb.index_handle_at_tip(), &mut mempool, &parent_tip, tip.total_burn, @@ -1966,7 +1967,7 @@ fn test_build_anchored_blocks_too_expensive_transactions() { let anchored_block = StacksBlockBuilder::build_anchored_block( chainstate, - &sortdb.index_conn(), + &sortdb.index_handle_at_tip(), &mut mempool, &parent_tip, tip.total_burn, @@ -2133,7 +2134,7 @@ fn test_build_anchored_blocks_invalid() { let coinbase_tx = make_coinbase(miner, tenure_id as usize); let mut anchored_block = StacksBlockBuilder::build_anchored_block( - chainstate, &sortdb.index_conn(), &mut mempool, &parent_tip, tip.total_burn, vrf_proof, Hash160([tenure_id as u8; 20]), &coinbase_tx, BlockBuilderSettings::max_value(), None, &burnchain, + chainstate, &sortdb.index_handle_at_tip(), &mut mempool, &parent_tip, tip.total_burn, vrf_proof, Hash160([tenure_id as u8; 20]), &coinbase_tx, BlockBuilderSettings::max_value(), None, &burnchain, ).unwrap(); if tenure_id == bad_block_tenure { @@ -2403,7 +2404,7 @@ fn test_build_anchored_blocks_bad_nonces() { let anchored_block = StacksBlockBuilder::build_anchored_block( chainstate, - &sortdb.index_conn(), + &sortdb.index_handle_at_tip(), &mut mempool, &parent_tip, tip.total_burn, @@ -2531,7 +2532,7 @@ fn test_build_microblock_stream_forks() { // produce the microblock stream for the parent, which this tenure's anchor // block will confirm. - let sort_ic = sortdb.index_conn(); + let sort_ic = sortdb.index_handle_at_tip(); chainstate .reload_unconfirmed_state(&sort_ic, parent_index_hash.clone()) @@ -2654,7 +2655,7 @@ fn test_build_microblock_stream_forks() { let (anchored_block, block_size, block_execution_cost) = StacksBlockBuilder::build_anchored_block( chainstate, - &sortdb.index_conn(), + &sortdb.index_handle_at_tip(), &mut mempool, &parent_tip, tip.total_burn, @@ -2858,7 +2859,7 @@ fn test_build_microblock_stream_forks_with_descendants() { // produce the microblock stream for the parent, which this tenure's anchor // block will confirm. - let sort_ic = sortdb.index_conn(); + let sort_ic = sortdb.index_handle_at_tip(); chainstate .reload_unconfirmed_state(&sort_ic, parent_index_hash.clone()) @@ -3059,7 +3060,7 @@ fn test_build_microblock_stream_forks_with_descendants() { // erase any pending transactions -- this is a "worse" poison-microblock, // and we want to avoid mining the "better" one - mempool.clear_before_height(10).unwrap(); + mempool.clear_before_coinbase_height(10).unwrap(); let mut tx_bytes = vec![]; poison_microblock_tx @@ -3081,7 +3082,7 @@ fn test_build_microblock_stream_forks_with_descendants() { let (anchored_block, block_size, block_execution_cost) = StacksBlockBuilder::build_anchored_block( chainstate, - &sortdb.index_conn(), + &sortdb.index_handle_at_tip(), &mut mempool, &parent_tip, parent_tip.anchored_header.as_stacks_epoch2().unwrap().total_work.burn + 1000, @@ -3186,15 +3187,19 @@ fn test_build_microblock_stream_forks_with_descendants() { test_debug!("Check {} in {} for report", &reporter_addr, &chain_tip); peer.with_db_state(|ref mut sortdb, ref mut chainstate, _, _| { chainstate - .with_read_only_clarity_tx(&sortdb.index_conn(), &chain_tip, |clarity_tx| { - // the key at height 1 should be reported as poisoned - let report = StacksChainState::get_poison_microblock_report(clarity_tx, 1) - .unwrap() - .unwrap(); - assert_eq!(report.0, reporter_addr); - assert_eq!(report.1, seq); - Ok(()) - }) + .with_read_only_clarity_tx( + &sortdb.index_handle_at_tip(), + &chain_tip, + |clarity_tx| { + // the key at height 1 should be reported as poisoned + let report = StacksChainState::get_poison_microblock_report(clarity_tx, 1) + .unwrap() + .unwrap(); + assert_eq!(report.0, reporter_addr); + assert_eq!(report.1, seq); + Ok(()) + }, + ) .unwrap() }) .unwrap(); @@ -3429,7 +3434,7 @@ fn test_contract_call_across_clarity_versions() { let contract = format!(" (impl-trait .chain-id-trait-v1.trait-v1) (impl-trait .chain-id-trait-v2.trait-v2) - + (use-trait chain-info-v1 .chain-id-trait-v1.trait-v1) (use-trait chain-info-v2 .chain-id-trait-v2.trait-v2) @@ -3468,7 +3473,7 @@ fn test_contract_call_across_clarity_versions() { ) ) (define-read-only (test-at-block-recursive) - (at-block 0x{} + (at-block 0x{} (begin ;; this only works in clarity2 (print {{ tenure: u{}, version: u2, chain: chain-id, func: \"test-at-block-func-recursive-v2\" }}) @@ -3547,7 +3552,7 @@ fn test_contract_call_across_clarity_versions() { let contract = format!(" (impl-trait .chain-id-trait-v1.trait-v1) (impl-trait .chain-id-trait-v2.trait-v2) - + (use-trait chain-info-v1 .chain-id-trait-v1.trait-v1) (use-trait chain-info-v2 .chain-id-trait-v2.trait-v2) @@ -3583,14 +3588,14 @@ fn test_contract_call_across_clarity_versions() { ) ) (define-read-only (test-at-block-recursive) - (at-block 0x{} + (at-block 0x{} (begin (print {{ tenure: u{}, version: u1, func: \"test-at-block-func-recursive-v1\" }}) (contract-call? .test-{} test-at-block-recursive) ) ) ) - + (define-read-only (get-call-count) (var-get call-count) ) @@ -3659,7 +3664,7 @@ fn test_contract_call_across_clarity_versions() { } } - let sort_ic = sortdb.index_conn(); + let sort_ic = sortdb.index_handle_at_tip(); let builder = StacksBlockBuilder::make_block_builder( &burnchain, @@ -3700,7 +3705,7 @@ fn test_contract_call_across_clarity_versions() { let stacks_block_id = StacksBlockHeader::make_index_block_hash(&consensus_hash, &block_bhh); peer.chainstate().with_read_only_clarity_tx( - &sortdb.index_conn(), + &sortdb.index_handle_at_tip(), &stacks_block_id, |clarity_tx| { for tenure_id in 1..num_blocks { @@ -3919,7 +3924,7 @@ fn test_is_tx_problematic() { if let Err(ChainstateError::ProblematicTransaction(txid)) = StacksBlockBuilder::make_anchored_block_from_txs( block_builder, chainstate, - &sortdb.index_conn(), + &sortdb.index_handle_at_tip(), vec![coinbase_tx.clone(), contract_spends_too_much_tx.clone()] ) { assert_eq!(txid, contract_spends_too_much_txid); @@ -4096,7 +4101,7 @@ fn test_is_tx_problematic() { if let Err(ChainstateError::ProblematicTransaction(txid)) = StacksBlockBuilder::make_anchored_block_from_txs( block_builder, chainstate, - &sortdb.index_conn(), + &sortdb.index_handle_at_tip(), vec![coinbase_tx.clone(), spend_too_much.clone()] ) { assert_eq!(txid, spend_too_much.txid()); @@ -4146,7 +4151,7 @@ fn test_is_tx_problematic() { let err = StacksBlockBuilder::make_anchored_block_from_txs( block_builder, chainstate, - &sortdb.index_conn(), + &sortdb.index_handle_at_tip(), vec![coinbase_tx.clone(), runtime_checkerror_problematic.clone()] ); @@ -4198,7 +4203,7 @@ fn test_is_tx_problematic() { if let Err(ChainstateError::ProblematicTransaction(txid)) = StacksBlockBuilder::make_anchored_block_from_txs( block_builder, chainstate, - &sortdb.index_conn(), + &sortdb.index_handle_at_tip(), vec![coinbase_tx.clone(), runtime_checkerror_problematic.clone()] ) { assert_eq!(txid, runtime_checkerror_problematic.txid()); @@ -4229,7 +4234,7 @@ fn test_is_tx_problematic() { let anchored_block = StacksBlockBuilder::build_anchored_block( chainstate, - &sortdb.index_conn(), + &sortdb.index_handle_at_tip(), &mut mempool, &parent_tip, tip.total_burn, @@ -4392,7 +4397,7 @@ fn mempool_incorporate_pox_unlocks() { // this will be the height of the block that includes this new tenure let my_height = first_stacks_block_height + 1 + tenure_id; - let available_balance = chainstate.with_read_only_clarity_tx(&sortdb.index_conn(), &parent_tip.index_block_hash(), |clarity_tx| { + let available_balance = chainstate.with_read_only_clarity_tx(&sortdb.index_handle_at_tip(), &parent_tip.index_block_hash(), |clarity_tx| { clarity_tx.with_clarity_db_readonly(|db| { let burn_block_height = db.get_current_burnchain_block_height().unwrap() as u64; let v1_unlock_height = db.get_v1_unlock_height(); @@ -4472,7 +4477,7 @@ fn mempool_incorporate_pox_unlocks() { let anchored_block = StacksBlockBuilder::build_anchored_block( chainstate, - &sortdb.index_conn(), + &sortdb.index_handle_at_tip(), &mut mempool, &parent_tip, tip.total_burn, @@ -4612,7 +4617,7 @@ fn test_fee_order_mismatch_nonce_order() { let anchored_block = StacksBlockBuilder::build_anchored_block( chainstate, - &sortdb.index_conn(), + &sortdb.index_handle_at_tip(), &mut mempool, &parent_tip, tip.total_burn, @@ -4746,6 +4751,7 @@ fn paramaterized_mempool_walk_test( 0x80000000, &TransactionAnchorMode::Any, &TransactionPostConditionMode::Allow, + StacksEpochId::latest(), ); let mut transaction_counter = 0; @@ -4778,6 +4784,7 @@ fn paramaterized_mempool_walk_test( &mut chainstate, &b_1.0, &b_1.1, + true, txid, tx_bytes, tx_fee, @@ -4794,7 +4801,7 @@ fn paramaterized_mempool_walk_test( mempool_tx .execute( "UPDATE mempool SET fee_rate = ? WHERE txid = ?", - rusqlite::params![Some(123.0), &txid], + params![Some(123.0), &txid], ) .unwrap(); } else { @@ -4802,7 +4809,7 @@ fn paramaterized_mempool_walk_test( mempool_tx .execute( "UPDATE mempool SET fee_rate = ? WHERE txid = ?", - rusqlite::params![none, &txid], + params![none, &txid], ) .unwrap(); } @@ -4826,7 +4833,6 @@ fn paramaterized_mempool_walk_test( .iterate_candidates::<_, ChainstateError, _>( clarity_conn, &mut tx_events, - 2, mempool_settings.clone(), |_, available_tx, _| { count_txs += 1; diff --git a/stackslib/src/chainstate/stacks/tests/chain_histories.rs b/stackslib/src/chainstate/stacks/tests/chain_histories.rs index cc2fe940b14..4e1b774ba7c 100644 --- a/stackslib/src/chainstate/stacks/tests/chain_histories.rs +++ b/stackslib/src/chainstate/stacks/tests/chain_histories.rs @@ -150,7 +150,7 @@ where builder.chain_tip.stacks_block_height, ); - let sort_iconn = sortdb.index_conn(); + let sort_iconn = sortdb.index_handle_at_tip(); let mut miner_epoch_info = builder .pre_epoch_begin(&mut miner_chainstate, &sort_iconn, true) .unwrap(); @@ -336,7 +336,7 @@ where builder.chain_tip.stacks_block_height, ); - let sort_iconn = sortdb.index_conn(); + let sort_iconn = sortdb.index_handle_at_tip(); let mut miner_epoch_info = builder .pre_epoch_begin(&mut miner_chainstate, &sort_iconn, true) .unwrap(); @@ -483,7 +483,7 @@ where builder.chain_tip.stacks_block_height, ); - let sort_iconn = sortdb.index_conn(); + let sort_iconn = sortdb.index_handle_at_tip(); let mut miner_epoch_info = builder .pre_epoch_begin(&mut miner_chainstate, &sort_iconn, true) .unwrap(); @@ -531,7 +531,7 @@ where builder.chain_tip.stacks_block_height, ); - let sort_iconn = sortdb.index_conn(); + let sort_iconn = sortdb.index_handle_at_tip(); let mut miner_epoch_info = builder .pre_epoch_begin(&mut miner_chainstate, &sort_iconn, true) .unwrap(); @@ -820,7 +820,7 @@ where builder.chain_tip.stacks_block_height, ); - let sort_iconn = sortdb.index_conn(); + let sort_iconn = sortdb.index_handle_at_tip(); let mut miner_epoch_info = builder .pre_epoch_begin(&mut miner_chainstate, &sort_iconn, true) .unwrap(); @@ -868,7 +868,7 @@ where builder.chain_tip.stacks_block_height, ); - let sort_iconn = sortdb.index_conn(); + let sort_iconn = sortdb.index_handle_at_tip(); let mut miner_epoch_info = builder .pre_epoch_begin(&mut miner_chainstate, &sort_iconn, true) .unwrap(); @@ -1085,7 +1085,7 @@ where builder.chain_tip.stacks_block_height, ); - let sort_iconn = sortdb.index_conn(); + let sort_iconn = sortdb.index_handle_at_tip(); let mut miner_epoch_info = builder .pre_epoch_begin(&mut miner_chainstate, &sort_iconn, true) .unwrap(); @@ -1134,7 +1134,7 @@ where builder.chain_tip.stacks_block_height, ); - let sort_iconn = sortdb.index_conn(); + let sort_iconn = sortdb.index_handle_at_tip(); let mut miner_epoch_info = builder .pre_epoch_begin(&mut miner_chainstate, &sort_iconn, true) .unwrap(); @@ -1433,7 +1433,7 @@ where builder.chain_tip.stacks_block_height, ); - let sort_iconn = sortdb.index_conn(); + let sort_iconn = sortdb.index_handle_at_tip(); let mut miner_epoch_info = builder .pre_epoch_begin(&mut miner_chainstate, &sort_iconn, true) .unwrap(); @@ -1478,7 +1478,7 @@ where builder.chain_tip.stacks_block_height, ); - let sort_iconn = sortdb.index_conn(); + let sort_iconn = sortdb.index_handle_at_tip(); let mut miner_epoch_info = builder .pre_epoch_begin(&mut miner_chainstate, &sort_iconn, true) .unwrap(); @@ -1680,7 +1680,7 @@ where builder.chain_tip.stacks_block_height, ); - let sort_iconn = sortdb.index_conn(); + let sort_iconn = sortdb.index_handle_at_tip(); let mut miner_epoch_info = builder .pre_epoch_begin(&mut miner_chainstate, &sort_iconn, true) .unwrap(); @@ -1728,7 +1728,7 @@ where builder.chain_tip.stacks_block_height, ); - let sort_iconn = sortdb.index_conn(); + let sort_iconn = sortdb.index_handle_at_tip(); let mut miner_epoch_info = builder .pre_epoch_begin(&mut miner_chainstate, &sort_iconn, true) .unwrap(); @@ -1988,7 +1988,7 @@ where builder.chain_tip.stacks_block_height, ); - let sort_iconn = sortdb.index_conn(); + let sort_iconn = sortdb.index_handle_at_tip(); let mut miner_epoch_info = builder .pre_epoch_begin(&mut miner_chainstate, &sort_iconn, true) .unwrap(); @@ -2033,7 +2033,7 @@ where builder.chain_tip.stacks_block_height, ); - let sort_iconn = sortdb.index_conn(); + let sort_iconn = sortdb.index_handle_at_tip(); let mut miner_epoch_info = builder .pre_epoch_begin(&mut miner_chainstate, &sort_iconn, true) .unwrap(); @@ -2235,7 +2235,7 @@ where builder.chain_tip.stacks_block_height, ); - let sort_iconn = sortdb.index_conn(); + let sort_iconn = sortdb.index_handle_at_tip(); let mut miner_epoch_info = builder .pre_epoch_begin(&mut miner_chainstate, &sort_iconn, true) .unwrap(); @@ -2283,7 +2283,7 @@ where builder.chain_tip.stacks_block_height, ); - let sort_iconn = sortdb.index_conn(); + let sort_iconn = sortdb.index_handle_at_tip(); let mut miner_epoch_info = builder .pre_epoch_begin(&mut miner_chainstate, &sort_iconn, true) .unwrap(); @@ -3117,7 +3117,7 @@ pub fn mine_smart_contract_block_contract_call_microblock_exception( microblocks.push(microblock); } - test_debug!("Produce anchored stacks block {} with smart contract and {} microblocks with contract call at burnchain height {} stacks height {}", + test_debug!("Produce anchored stacks block {} with smart contract and {} microblocks with contract call at burnchain height {} stacks height {}", stacks_block.block_hash(), microblocks.len(), burnchain_height, stacks_block.header.total_work.work); (stacks_block, microblocks) diff --git a/stackslib/src/chainstate/stacks/tests/mod.rs b/stackslib/src/chainstate/stacks/tests/mod.rs index 22a331b1936..cda74cb46d1 100644 --- a/stackslib/src/chainstate/stacks/tests/mod.rs +++ b/stackslib/src/chainstate/stacks/tests/mod.rs @@ -1005,7 +1005,7 @@ pub fn get_all_mining_rewards( block_height: u64, ) -> Vec> { let mut ret = vec![]; - let mut tx = chainstate.index_tx_begin().unwrap(); + let mut tx = chainstate.index_tx_begin(); for i in 0..block_height { let block_rewards = @@ -1227,6 +1227,108 @@ pub fn make_versioned_user_contract_publish( sign_standard_singlesig_tx(payload, sender, nonce, tx_fee) } +pub fn sign_tx_order_independent_p2sh( + payload: TransactionPayload, + privks: &[StacksPrivateKey], + num_sigs: usize, + sender_nonce: u64, + tx_fee: u64, +) -> StacksTransaction { + let mut pubks = vec![]; + for privk in privks.iter() { + pubks.push(StacksPublicKey::from_private(privk)); + } + let mut sender_spending_condition = + TransactionSpendingCondition::new_multisig_order_independent_p2sh( + num_sigs as u16, + pubks.clone(), + ) + .expect("Failed to create p2sh spending condition."); + sender_spending_condition.set_nonce(sender_nonce); + sender_spending_condition.set_tx_fee(tx_fee); + let auth = TransactionAuth::Standard(sender_spending_condition); + let mut unsigned_tx = StacksTransaction::new(TransactionVersion::Testnet, auth, payload); + unsigned_tx.anchor_mode = TransactionAnchorMode::OnChainOnly; + unsigned_tx.post_condition_mode = TransactionPostConditionMode::Allow; + unsigned_tx.chain_id = 0x80000000; + + let mut tx_signer = StacksTransactionSigner::new(&unsigned_tx); + + for signer in 0..num_sigs { + tx_signer.sign_origin(&privks[signer]).unwrap(); + } + + for signer in num_sigs..pubks.len() { + tx_signer.append_origin(&pubks[signer]).unwrap(); + } + + tx_signer.get_tx().unwrap() +} + +pub fn sign_tx_order_independent_p2wsh( + payload: TransactionPayload, + privks: &[StacksPrivateKey], + num_sigs: usize, + sender_nonce: u64, + tx_fee: u64, +) -> StacksTransaction { + let mut pubks = vec![]; + for privk in privks.iter() { + pubks.push(StacksPublicKey::from_private(privk)); + } + let mut sender_spending_condition = + TransactionSpendingCondition::new_multisig_order_independent_p2wsh( + num_sigs as u16, + pubks.clone(), + ) + .expect("Failed to create p2wsh spending condition."); + sender_spending_condition.set_nonce(sender_nonce); + sender_spending_condition.set_tx_fee(tx_fee); + let auth = TransactionAuth::Standard(sender_spending_condition); + let mut unsigned_tx = StacksTransaction::new(TransactionVersion::Testnet, auth, payload); + unsigned_tx.anchor_mode = TransactionAnchorMode::OnChainOnly; + unsigned_tx.post_condition_mode = TransactionPostConditionMode::Allow; + unsigned_tx.chain_id = 0x80000000; + + let mut tx_signer = StacksTransactionSigner::new(&unsigned_tx); + + for signer in 0..num_sigs { + tx_signer.sign_origin(&privks[signer]).unwrap(); + } + + for signer in num_sigs..pubks.len() { + tx_signer.append_origin(&pubks[signer]).unwrap(); + } + + tx_signer.get_tx().unwrap() +} + +pub fn make_stacks_transfer_order_independent_p2sh( + privks: &[StacksPrivateKey], + num_sigs: usize, + nonce: u64, + tx_fee: u64, + recipient: &PrincipalData, + amount: u64, +) -> StacksTransaction { + let payload = + TransactionPayload::TokenTransfer(recipient.clone(), amount, TokenTransferMemo([0; 34])); + sign_tx_order_independent_p2sh(payload, privks, num_sigs, nonce, tx_fee) +} + +pub fn make_stacks_transfer_order_independent_p2wsh( + privks: &[StacksPrivateKey], + num_sigs: usize, + nonce: u64, + tx_fee: u64, + recipient: &PrincipalData, + amount: u64, +) -> StacksTransaction { + let payload = + TransactionPayload::TokenTransfer(recipient.clone(), amount, TokenTransferMemo([0; 34])); + sign_tx_order_independent_p2wsh(payload, privks, num_sigs, nonce, tx_fee) +} + pub fn make_user_contract_call( sender: &StacksPrivateKey, nonce: u64, @@ -1316,9 +1418,11 @@ pub fn get_stacks_account(peer: &mut TestPeer, addr: &PrincipalData) -> StacksAc let stacks_block_id = StacksBlockHeader::make_index_block_hash(&consensus_hash, &block_bhh); let acct = chainstate - .with_read_only_clarity_tx(&sortdb.index_conn(), &stacks_block_id, |clarity_tx| { - StacksChainState::get_account(clarity_tx, addr) - }) + .with_read_only_clarity_tx( + &sortdb.index_handle_at_tip(), + &stacks_block_id, + |clarity_tx| StacksChainState::get_account(clarity_tx, addr), + ) .unwrap(); Ok(acct) }) diff --git a/stackslib/src/chainstate/stacks/transaction.rs b/stackslib/src/chainstate/stacks/transaction.rs index 4ede285e41c..2204f57a255 100644 --- a/stackslib/src/chainstate/stacks/transaction.rs +++ b/stackslib/src/chainstate/stacks/transaction.rs @@ -120,6 +120,7 @@ fn ClarityVersion_consensus_serialize( match *version { ClarityVersion::Clarity1 => write_next(fd, &1u8)?, ClarityVersion::Clarity2 => write_next(fd, &2u8)?, + ClarityVersion::Clarity3 => write_next(fd, &3u8)?, } Ok(()) } @@ -131,6 +132,7 @@ fn ClarityVersion_consensus_deserialize( match version_byte { 1u8 => Ok(ClarityVersion::Clarity1), 2u8 => Ok(ClarityVersion::Clarity2), + 3u8 => Ok(ClarityVersion::Clarity3), _ => Err(codec_error::DeserializeError(format!( "Unrecognized ClarityVersion byte {}", &version_byte @@ -686,19 +688,17 @@ impl StacksTransaction { ))); } }; + let tx = StacksTransaction { + version, + chain_id, + auth, + anchor_mode, + post_condition_mode, + post_conditions, + payload, + }; - Ok(( - StacksTransaction { - version, - chain_id, - auth, - anchor_mode, - post_condition_mode, - post_conditions, - payload, - }, - fd.num_read(), - )) + Ok((tx, fd.num_read())) } /// Try to convert to a coinbase payload @@ -873,6 +873,10 @@ impl StacksTransaction { privk, )?; match condition { + TransactionSpendingCondition::Singlesig(ref mut cond) => { + cond.set_signature(next_sig); + Ok(next_sighash) + } TransactionSpendingCondition::Multisig(ref mut cond) => { cond.push_signature( if privk.compress_public() { @@ -884,9 +888,16 @@ impl StacksTransaction { ); Ok(next_sighash) } - TransactionSpendingCondition::Singlesig(ref mut cond) => { - cond.set_signature(next_sig); - Ok(next_sighash) + TransactionSpendingCondition::OrderIndependentMultisig(ref mut cond) => { + cond.push_signature( + if privk.compress_public() { + TransactionPublicKeyEncoding::Compressed + } else { + TransactionPublicKeyEncoding::Uncompressed + }, + next_sig, + ); + Ok(*cur_sighash) } } } @@ -897,6 +908,9 @@ impl StacksTransaction { ) -> Option { match condition { TransactionSpendingCondition::Multisig(ref mut cond) => cond.pop_auth_field(), + TransactionSpendingCondition::OrderIndependentMultisig(ref mut cond) => { + cond.pop_auth_field() + } TransactionSpendingCondition::Singlesig(ref mut cond) => cond.pop_signature(), } } @@ -911,6 +925,10 @@ impl StacksTransaction { cond.push_public_key(pubkey.clone()); Ok(()) } + TransactionSpendingCondition::OrderIndependentMultisig(ref mut cond) => { + cond.push_public_key(pubkey.clone()); + Ok(()) + } _ => Err(net_error::SigningError( "Not a multisig condition".to_string(), )), @@ -1234,6 +1252,111 @@ mod test { use crate::net::codec::*; use crate::net::*; + impl StacksTransaction { + /// Sign a sighash without appending the signature and public key + /// to the given spending condition. + /// Returns the resulting signature + fn sign_no_append_origin( + &self, + cur_sighash: &Txid, + privk: &StacksPrivateKey, + ) -> Result { + let next_sig = match self.auth { + TransactionAuth::Standard(ref origin_condition) + | TransactionAuth::Sponsored(ref origin_condition, _) => { + let (next_sig, _next_sighash) = TransactionSpendingCondition::next_signature( + cur_sighash, + &TransactionAuthFlags::AuthStandard, + origin_condition.tx_fee(), + origin_condition.nonce(), + privk, + )?; + next_sig + } + }; + Ok(next_sig) + } + + /// Appends a signature and public key to the spending condition. + fn append_origin_signature( + &mut self, + signature: MessageSignature, + key_encoding: TransactionPublicKeyEncoding, + ) -> Result<(), net_error> { + match self.auth { + TransactionAuth::Standard(ref mut origin_condition) + | TransactionAuth::Sponsored(ref mut origin_condition, _) => match origin_condition + { + TransactionSpendingCondition::Singlesig(ref mut cond) => { + cond.set_signature(signature); + } + TransactionSpendingCondition::Multisig(ref mut cond) => { + cond.push_signature(key_encoding, signature); + } + TransactionSpendingCondition::OrderIndependentMultisig(ref mut cond) => { + cond.push_signature(key_encoding, signature); + } + }, + }; + Ok(()) + } + + /// Sign a sighash as a sponsor without appending the signature and public key + /// to the given spending condition. + /// Returns the resulting signature + fn sign_no_append_sponsor( + &mut self, + cur_sighash: &Txid, + privk: &StacksPrivateKey, + ) -> Result { + let next_sig = match self.auth { + TransactionAuth::Standard(_) => { + return Err(net_error::SigningError( + "Cannot sign standard authorization with a sponsoring private key" + .to_string(), + )); + } + TransactionAuth::Sponsored(_, ref mut sponsor_condition) => { + let (next_sig, _next_sighash) = TransactionSpendingCondition::next_signature( + cur_sighash, + &TransactionAuthFlags::AuthSponsored, + sponsor_condition.tx_fee(), + sponsor_condition.nonce(), + privk, + )?; + next_sig + } + }; + Ok(next_sig) + } + + /// Appends a sponsor signature and public key to the spending condition. + pub fn append_sponsor_signature( + &mut self, + signature: MessageSignature, + key_encoding: TransactionPublicKeyEncoding, + ) -> Result<(), net_error> { + match self.auth { + TransactionAuth::Standard(_) => Err(net_error::SigningError( + "Cannot appned a public key to the sponsor of a standard auth condition" + .to_string(), + )), + TransactionAuth::Sponsored(_, ref mut sponsor_condition) => match sponsor_condition + { + TransactionSpendingCondition::Singlesig(ref mut cond) => { + Ok(cond.set_signature(signature)) + } + TransactionSpendingCondition::Multisig(ref mut cond) => { + Ok(cond.push_signature(key_encoding, signature)) + } + TransactionSpendingCondition::OrderIndependentMultisig(ref mut cond) => { + Ok(cond.push_signature(key_encoding, signature)) + } + }, + } + } + } + fn corrupt_auth_field( corrupt_auth_fields: &TransactionAuth, i: usize, @@ -1264,6 +1387,20 @@ mod test { }; data.fields[i] = corrupt_field } + TransactionSpendingCondition::OrderIndependentMultisig(ref mut data) => { + let corrupt_field = match data.fields[i] { + TransactionAuthField::PublicKey(ref pubkey) => { + TransactionAuthField::PublicKey(StacksPublicKey::from_hex("0270790e675116a63a75008832d82ad93e4332882ab0797b0f156de9d739160a0b").unwrap()) + } + TransactionAuthField::Signature(ref key_encoding, ref sig) => { + let mut sig_bytes = sig.as_bytes().to_vec(); + sig_bytes[1] ^= 1u8; // this breaks the `r` paramter + let corrupt_sig = MessageSignature::from_raw(&sig_bytes); + TransactionAuthField::Signature(*key_encoding, corrupt_sig) + } + }; + data.fields[i] = corrupt_field + } } } } @@ -1289,6 +1426,20 @@ mod test { }; data.fields[i] = corrupt_field } + TransactionSpendingCondition::OrderIndependentMultisig(ref mut data) => { + let corrupt_field = match data.fields[i] { + TransactionAuthField::PublicKey(_) => { + TransactionAuthField::PublicKey(StacksPublicKey::from_hex("0270790e675116a63a75008832d82ad93e4332882ab0797b0f156de9d739160a0b").unwrap()) + } + TransactionAuthField::Signature(ref key_encoding, ref sig) => { + let mut sig_bytes = sig.as_bytes().to_vec(); + sig_bytes[1] ^= 1u8; // this breaks the `r` paramter + let corrupt_sig = MessageSignature::from_raw(&sig_bytes); + TransactionAuthField::Signature(*key_encoding, corrupt_sig) + } + }; + data.fields[i] = corrupt_field + } } } if corrupt_sponsor { @@ -1312,6 +1463,20 @@ mod test { }; data.fields[i] = corrupt_field } + TransactionSpendingCondition::OrderIndependentMultisig(ref mut data) => { + let corrupt_field = match data.fields[i] { + TransactionAuthField::PublicKey(ref pubkey) => { + TransactionAuthField::PublicKey(StacksPublicKey::from_hex("0270790e675116a63a75008832d82ad93e4332882ab0797b0f156de9d739160a0b").unwrap()) + } + TransactionAuthField::Signature(ref key_encoding, ref sig) => { + let mut sig_bytes = sig.as_bytes().to_vec(); + sig_bytes[1] ^= 1u8; // this breaks the `r` paramter + let corrupt_sig = MessageSignature::from_raw(&sig_bytes); + TransactionAuthField::Signature(*key_encoding, corrupt_sig) + } + }; + data.fields[i] = corrupt_field + } } } } @@ -1325,15 +1490,20 @@ mod test { TransactionSpendingCondition::Multisig(ref data) => { let mut j = 0; for f in 0..data.fields.len() { - match data.fields[f] { - TransactionAuthField::Signature(_, _) => { - j = f; - break; - } - _ => { - continue; - } - } + if matches!(data.fields[f], TransactionAuthField::Signature(..)) { + j = f; + break; + }; + } + j + } + TransactionSpendingCondition::OrderIndependentMultisig(ref data) => { + let mut j = 0; + for f in 0..data.fields.len() { + if matches!(data.fields[f], TransactionAuthField::Signature(..)) { + j = f; + break; + }; } j } @@ -1346,15 +1516,20 @@ mod test { TransactionSpendingCondition::Multisig(ref data) => { let mut j = 0; for f in 0..data.fields.len() { - match data.fields[f] { - TransactionAuthField::PublicKey(_) => { - j = f; - break; - } - _ => { - continue; - } - } + if matches!(data.fields[f], TransactionAuthField::PublicKey(_)) { + j = f; + break; + }; + } + j + } + TransactionSpendingCondition::OrderIndependentMultisig(ref data) => { + let mut j = 0; + for f in 0..data.fields.len() { + if matches!(data.fields[f], TransactionAuthField::PublicKey(_)) { + j = f; + break; + }; } j } @@ -1446,6 +1621,14 @@ mod test { MultisigHashMode::P2SH }; } + TransactionSpendingCondition::OrderIndependentMultisig(ref mut data) => { + data.hash_mode = + if data.hash_mode == OrderIndependentMultisigHashMode::P2SH { + OrderIndependentMultisigHashMode::P2WSH + } else { + OrderIndependentMultisigHashMode::P2SH + }; + } } } } @@ -1466,6 +1649,14 @@ mod test { MultisigHashMode::P2SH }; } + TransactionSpendingCondition::OrderIndependentMultisig(ref mut data) => { + data.hash_mode = + if data.hash_mode == OrderIndependentMultisigHashMode::P2SH { + OrderIndependentMultisigHashMode::P2WSH + } else { + OrderIndependentMultisigHashMode::P2SH + }; + } } } if corrupt_sponsor { @@ -1484,6 +1675,14 @@ mod test { MultisigHashMode::P2SH }; } + TransactionSpendingCondition::OrderIndependentMultisig(ref mut data) => { + data.hash_mode = + if data.hash_mode == OrderIndependentMultisigHashMode::P2SH { + OrderIndependentMultisigHashMode::P2WSH + } else { + OrderIndependentMultisigHashMode::P2SH + }; + } } } } @@ -1504,6 +1703,9 @@ mod test { TransactionSpendingCondition::Multisig(ref mut data) => { data.nonce += 1; } + TransactionSpendingCondition::OrderIndependentMultisig(ref mut data) => { + data.nonce += 1; + } }; } } @@ -1516,6 +1718,9 @@ mod test { TransactionSpendingCondition::Multisig(ref mut data) => { data.nonce += 1; } + TransactionSpendingCondition::OrderIndependentMultisig(ref mut data) => { + data.nonce += 1; + } } } if corrupt_sponsor { @@ -1526,6 +1731,9 @@ mod test { TransactionSpendingCondition::Multisig(ref mut data) => { data.nonce += 1; } + TransactionSpendingCondition::OrderIndependentMultisig(ref mut data) => { + data.nonce += 1; + } } } } @@ -1566,6 +1774,10 @@ mod test { is_multisig_origin = true; data.signatures_required += 1; } + TransactionSpendingCondition::OrderIndependentMultisig(ref mut data) => { + is_multisig_origin = true; + data.signatures_required += 1; + } }; } } @@ -1577,6 +1789,10 @@ mod test { is_multisig_origin = true; data.signatures_required += 1; } + TransactionSpendingCondition::OrderIndependentMultisig(ref mut data) => { + is_multisig_origin = true; + data.signatures_required += 1; + } } } if corrupt_sponsor { @@ -1586,6 +1802,10 @@ mod test { is_multisig_sponsor = true; data.signatures_required += 1; } + TransactionSpendingCondition::OrderIndependentMultisig(ref mut data) => { + is_multisig_sponsor = true; + data.signatures_required += 1; + } } } } @@ -3649,6 +3869,7 @@ mod test { 0, &TransactionAnchorMode::OnChainOnly, &TransactionPostConditionMode::Deny, + StacksEpochId::latest(), ); for tx in all_txs.iter() { let mut tx_bytes = vec![ @@ -3849,6 +4070,17 @@ mod test { assert_eq!(txid_before, signed_tx.txid()); } + fn is_order_independent_multisig(tx: &StacksTransaction) -> bool { + let spending_condition = match &tx.auth { + TransactionAuth::Standard(origin) => origin, + TransactionAuth::Sponsored(_, sponsor) => sponsor, + }; + match spending_condition { + TransactionSpendingCondition::OrderIndependentMultisig(..) => true, + _ => false, + } + } + fn check_oversign_origin_multisig(signed_tx: &StacksTransaction) -> () { let tx = signed_tx.clone(); let privk = StacksPrivateKey::from_hex( @@ -3865,7 +4097,14 @@ mod test { Ok(_) => assert!(false), Err(e) => match e { net_error::VerifyingError(msg) => { - assert_eq!(&msg, "Incorrect number of signatures") + if is_order_independent_multisig(&oversigned_tx) { + assert!( + msg.contains("Signer hash does not equal hash of public key(s)"), + "{msg}" + ) + } else { + assert_eq!(&msg, "Incorrect number of signatures") + } } _ => assert!(false), }, @@ -3922,7 +4161,14 @@ mod test { Ok(_) => assert!(false), Err(e) => match e { net_error::VerifyingError(msg) => { - assert_eq!(&msg, "Incorrect number of signatures") + if is_order_independent_multisig(&oversigned_tx) { + assert!( + msg.contains("Signer hash does not equal hash of public key(s)"), + "{msg}" + ) + } else { + assert_eq!(&msg, "Incorrect number of signatures") + } } _ => assert!(false), }, @@ -4066,7 +4312,7 @@ mod test { sponsor_address, StacksAddress { version: C32_ADDRESS_VERSION_MAINNET_SINGLESIG, - bytes: Hash160::from_hex("3597aaa4bde720be93e3829aae24e76e7fcdfd3e").unwrap() + bytes: Hash160::from_hex("3597aaa4bde720be93e3829aae24e76e7fcdfd3e").unwrap(), } ); @@ -4176,7 +4422,7 @@ mod test { origin_address, StacksAddress { version: C32_ADDRESS_VERSION_MAINNET_SINGLESIG, - bytes: Hash160::from_hex("693cd53eb47d4749762d7cfaf46902bda5be5f97").unwrap() + bytes: Hash160::from_hex("693cd53eb47d4749762d7cfaf46902bda5be5f97").unwrap(), } ); @@ -4261,14 +4507,14 @@ mod test { origin_address, StacksAddress { version: C32_ADDRESS_VERSION_MAINNET_SINGLESIG, - bytes: Hash160::from_hex("3597aaa4bde720be93e3829aae24e76e7fcdfd3e").unwrap() + bytes: Hash160::from_hex("3597aaa4bde720be93e3829aae24e76e7fcdfd3e").unwrap(), } ); assert_eq!( sponsor_address, StacksAddress { version: C32_ADDRESS_VERSION_MAINNET_SINGLESIG, - bytes: Hash160::from_hex("693cd53eb47d4749762d7cfaf46902bda5be5f97").unwrap() + bytes: Hash160::from_hex("693cd53eb47d4749762d7cfaf46902bda5be5f97").unwrap(), } ); @@ -4376,7 +4622,7 @@ mod test { origin_address, StacksAddress { version: C32_ADDRESS_VERSION_MAINNET_MULTISIG, - bytes: Hash160::from_hex("a23ea89d6529ac48ac766f720e480beec7f19273").unwrap() + bytes: Hash160::from_hex("a23ea89d6529ac48ac766f720e480beec7f19273").unwrap(), } ); @@ -4486,14 +4732,14 @@ mod test { origin_address, StacksAddress { version: C32_ADDRESS_VERSION_MAINNET_SINGLESIG, - bytes: Hash160::from_hex("3597aaa4bde720be93e3829aae24e76e7fcdfd3e").unwrap() + bytes: Hash160::from_hex("3597aaa4bde720be93e3829aae24e76e7fcdfd3e").unwrap(), } ); assert_eq!( sponsor_address, StacksAddress { version: C32_ADDRESS_VERSION_MAINNET_MULTISIG, - bytes: Hash160::from_hex("a23ea89d6529ac48ac766f720e480beec7f19273").unwrap() + bytes: Hash160::from_hex("a23ea89d6529ac48ac766f720e480beec7f19273").unwrap(), } ); @@ -4614,7 +4860,7 @@ mod test { origin_address, StacksAddress { version: C32_ADDRESS_VERSION_MAINNET_MULTISIG, - bytes: Hash160::from_hex("73a8b4a751a678fe83e9d35ce301371bb3d397f7").unwrap() + bytes: Hash160::from_hex("73a8b4a751a678fe83e9d35ce301371bb3d397f7").unwrap(), } ); @@ -4727,14 +4973,14 @@ mod test { origin_address, StacksAddress { version: C32_ADDRESS_VERSION_MAINNET_SINGLESIG, - bytes: Hash160::from_hex("3597aaa4bde720be93e3829aae24e76e7fcdfd3e").unwrap() + bytes: Hash160::from_hex("3597aaa4bde720be93e3829aae24e76e7fcdfd3e").unwrap(), } ); assert_eq!( sponsor_address, StacksAddress { version: C32_ADDRESS_VERSION_MAINNET_MULTISIG, - bytes: Hash160::from_hex("73a8b4a751a678fe83e9d35ce301371bb3d397f7").unwrap() + bytes: Hash160::from_hex("73a8b4a751a678fe83e9d35ce301371bb3d397f7").unwrap(), } ); @@ -4853,7 +5099,7 @@ mod test { origin_address, StacksAddress { version: C32_ADDRESS_VERSION_MAINNET_MULTISIG, - bytes: Hash160::from_hex("2136367c9c740e7dbed8795afdf8a6d273096718").unwrap() + bytes: Hash160::from_hex("2136367c9c740e7dbed8795afdf8a6d273096718").unwrap(), } ); @@ -4963,14 +5209,14 @@ mod test { origin_address, StacksAddress { version: C32_ADDRESS_VERSION_MAINNET_SINGLESIG, - bytes: Hash160::from_hex("3597aaa4bde720be93e3829aae24e76e7fcdfd3e").unwrap() + bytes: Hash160::from_hex("3597aaa4bde720be93e3829aae24e76e7fcdfd3e").unwrap(), } ); assert_eq!( sponsor_address, StacksAddress { version: C32_ADDRESS_VERSION_MAINNET_MULTISIG, - bytes: Hash160::from_hex("2136367c9c740e7dbed8795afdf8a6d273096718").unwrap() + bytes: Hash160::from_hex("2136367c9c740e7dbed8795afdf8a6d273096718").unwrap(), } ); @@ -5076,7 +5322,7 @@ mod test { origin_address, StacksAddress { version: C32_ADDRESS_VERSION_MAINNET_MULTISIG, - bytes: Hash160::from_hex("f15fa5c59d14ffcb615fa6153851cd802bb312d2").unwrap() + bytes: Hash160::from_hex("f15fa5c59d14ffcb615fa6153851cd802bb312d2").unwrap(), } ); @@ -5157,14 +5403,14 @@ mod test { origin_address, StacksAddress { version: C32_ADDRESS_VERSION_MAINNET_SINGLESIG, - bytes: Hash160::from_hex("3597aaa4bde720be93e3829aae24e76e7fcdfd3e").unwrap() + bytes: Hash160::from_hex("3597aaa4bde720be93e3829aae24e76e7fcdfd3e").unwrap(), } ); assert_eq!( sponsor_address, StacksAddress { version: C32_ADDRESS_VERSION_MAINNET_MULTISIG, - bytes: Hash160::from_hex("f15fa5c59d14ffcb615fa6153851cd802bb312d2").unwrap() + bytes: Hash160::from_hex("f15fa5c59d14ffcb615fa6153851cd802bb312d2").unwrap(), } ); @@ -5268,7 +5514,7 @@ mod test { origin_address, StacksAddress { version: C32_ADDRESS_VERSION_MAINNET_MULTISIG, - bytes: Hash160::from_hex("f5cfb61a07fb41a32197da01ce033888f0fe94a7").unwrap() + bytes: Hash160::from_hex("f5cfb61a07fb41a32197da01ce033888f0fe94a7").unwrap(), } ); @@ -5379,14 +5625,14 @@ mod test { origin_address, StacksAddress { version: C32_ADDRESS_VERSION_MAINNET_SINGLESIG, - bytes: Hash160::from_hex("3597aaa4bde720be93e3829aae24e76e7fcdfd3e").unwrap() + bytes: Hash160::from_hex("3597aaa4bde720be93e3829aae24e76e7fcdfd3e").unwrap(), } ); assert_eq!( sponsor_address, StacksAddress { version: C32_ADDRESS_VERSION_MAINNET_MULTISIG, - bytes: Hash160::from_hex("f5cfb61a07fb41a32197da01ce033888f0fe94a7").unwrap() + bytes: Hash160::from_hex("f5cfb61a07fb41a32197da01ce033888f0fe94a7").unwrap(), } ); @@ -5475,6 +5721,2972 @@ mod test { } } - // TODO(test): test with different tx versions - // TODO(test): test error values for signing and verifying + #[test] + fn tx_stacks_transaction_sign_verify_standard_order_independent_p2sh() { + let privk_1 = StacksPrivateKey::from_hex( + "6d430bb91222408e7706c9001cfaeb91b08c2be6d5ac95779ab52c6b431950e001", + ) + .unwrap(); + let privk_2 = StacksPrivateKey::from_hex( + "2a584d899fed1d24e26b524f202763c8ab30260167429f157f1c119f550fa6af01", + ) + .unwrap(); + let privk_3 = StacksPrivateKey::from_hex( + "d5200dee706ee53ae98a03fba6cf4fdcc5084c30cfa9e1b3462dcdeaa3e0f1d201", + ) + .unwrap(); + + let pubk_1 = StacksPublicKey::from_private(&privk_1); + let pubk_2 = StacksPublicKey::from_private(&privk_2); + let pubk_3 = StacksPublicKey::from_private(&privk_3); + + let origin_auth = TransactionAuth::Standard( + TransactionSpendingCondition::new_multisig_order_independent_p2sh( + 2, + vec![pubk_1.clone(), pubk_2.clone(), pubk_3.clone()], + ) + .unwrap(), + ); + + let origin_address = origin_auth.origin().address_mainnet(); + assert_eq!( + origin_address, + StacksAddress { + version: C32_ADDRESS_VERSION_MAINNET_MULTISIG, + bytes: Hash160::from_hex("a23ea89d6529ac48ac766f720e480beec7f19273").unwrap(), + } + ); + + let txs = tx_stacks_transaction_test_txs(&origin_auth); + + for mut tx in txs { + assert_eq!(tx.auth().origin().num_signatures(), 0); + + let initial_sig_hash = tx.sign_begin(); + let sig3 = tx + .sign_no_append_origin(&initial_sig_hash, &privk_3) + .unwrap(); + let sig2 = tx + .sign_no_append_origin(&initial_sig_hash, &privk_2) + .unwrap(); + + let _ = tx.append_next_origin(&pubk_1); + let _ = tx.append_origin_signature(sig2, TransactionPublicKeyEncoding::Compressed); + let _ = tx.append_origin_signature(sig3, TransactionPublicKeyEncoding::Compressed); + + check_oversign_origin_multisig(&mut tx); + check_sign_no_sponsor(&mut tx); + + assert_eq!(tx.auth().origin().num_signatures(), 2); + + // auth is standard and first two auth fields are signatures for compressed keys. + // third field is the third public key + match tx.auth { + TransactionAuth::Standard(ref origin) => match origin { + TransactionSpendingCondition::OrderIndependentMultisig(ref data) => { + assert_eq!(data.signer, origin_address.bytes); + assert_eq!(data.fields.len(), 3); + assert!(data.fields[0].is_public_key()); + assert!(data.fields[1].is_signature()); + assert!(data.fields[2].is_signature()); + + assert_eq!(data.fields[0].as_public_key().unwrap(), pubk_1); + assert_eq!( + data.fields[1].as_signature().unwrap().0, + TransactionPublicKeyEncoding::Compressed + ); + assert_eq!( + data.fields[2].as_signature().unwrap().0, + TransactionPublicKeyEncoding::Compressed + ); + } + _ => assert!(false), + }, + _ => assert!(false), + }; + + test_signature_and_corruption(&tx, true, false); + } + } + + #[test] + fn tx_stacks_transaction_sign_verify_standard_order_independent_p2sh_extra_signers() { + let privk_1 = StacksPrivateKey::from_hex( + "6d430bb91222408e7706c9001cfaeb91b08c2be6d5ac95779ab52c6b431950e001", + ) + .unwrap(); + let privk_2 = StacksPrivateKey::from_hex( + "2a584d899fed1d24e26b524f202763c8ab30260167429f157f1c119f550fa6af01", + ) + .unwrap(); + let privk_3 = StacksPrivateKey::from_hex( + "d5200dee706ee53ae98a03fba6cf4fdcc5084c30cfa9e1b3462dcdeaa3e0f1d201", + ) + .unwrap(); + + let pubk_1 = StacksPublicKey::from_private(&privk_1); + let pubk_2 = StacksPublicKey::from_private(&privk_2); + let pubk_3 = StacksPublicKey::from_private(&privk_3); + + let origin_auth = TransactionAuth::Standard( + TransactionSpendingCondition::new_multisig_order_independent_p2sh( + 2, + vec![pubk_1.clone(), pubk_2.clone(), pubk_3.clone()], + ) + .unwrap(), + ); + + let origin_address = origin_auth.origin().address_mainnet(); + assert_eq!( + origin_address, + StacksAddress { + version: C32_ADDRESS_VERSION_MAINNET_MULTISIG, + bytes: Hash160::from_hex("a23ea89d6529ac48ac766f720e480beec7f19273").unwrap(), + } + ); + + let txs = tx_stacks_transaction_test_txs(&origin_auth); + + for mut tx in txs { + assert_eq!(tx.auth().origin().num_signatures(), 0); + + let initial_sig_hash = tx.sign_begin(); + let sig3 = tx + .sign_no_append_origin(&initial_sig_hash, &privk_3) + .unwrap(); + let sig2 = tx + .sign_no_append_origin(&initial_sig_hash, &privk_2) + .unwrap(); + let sig1 = tx + .sign_no_append_origin(&initial_sig_hash, &privk_1) + .unwrap(); + + let _ = tx.append_origin_signature(sig1, TransactionPublicKeyEncoding::Compressed); + let _ = tx.append_origin_signature(sig2, TransactionPublicKeyEncoding::Compressed); + let _ = tx.append_origin_signature(sig3, TransactionPublicKeyEncoding::Compressed); + + //check_oversign_origin_multisig(&mut tx); + check_sign_no_sponsor(&mut tx); + + assert_eq!(tx.auth().origin().num_signatures(), 3); + + // auth is standard and first two auth fields are signatures for compressed keys. + // third field is the third public key + match tx.auth { + TransactionAuth::Standard(ref origin) => match origin { + TransactionSpendingCondition::OrderIndependentMultisig(ref data) => { + assert_eq!(data.signer, origin_address.bytes); + assert_eq!(data.fields.len(), 3); + assert!(data.fields[0].is_signature()); + assert!(data.fields[1].is_signature()); + assert!(data.fields[2].is_signature()); + + assert_eq!( + data.fields[0].as_signature().unwrap().0, + TransactionPublicKeyEncoding::Compressed + ); + assert_eq!( + data.fields[1].as_signature().unwrap().0, + TransactionPublicKeyEncoding::Compressed + ); + assert_eq!( + data.fields[2].as_signature().unwrap().0, + TransactionPublicKeyEncoding::Compressed + ); + } + _ => assert!(false), + }, + _ => assert!(false), + }; + + test_signature_and_corruption(&tx, true, false); + } + } + + #[test] + fn tx_stacks_transaction_sign_verify_sponsored_order_independent_p2sh() { + let origin_privk = StacksPrivateKey::from_hex( + "807bbe9e471ac976592cc35e3056592ecc0f778ee653fced3b491a122dd8d59701", + ) + .unwrap(); + + let privk_1 = StacksPrivateKey::from_hex( + "6d430bb91222408e7706c9001cfaeb91b08c2be6d5ac95779ab52c6b431950e001", + ) + .unwrap(); + let privk_2 = StacksPrivateKey::from_hex( + "2a584d899fed1d24e26b524f202763c8ab30260167429f157f1c119f550fa6af01", + ) + .unwrap(); + let privk_3 = StacksPrivateKey::from_hex( + "d5200dee706ee53ae98a03fba6cf4fdcc5084c30cfa9e1b3462dcdeaa3e0f1d201", + ) + .unwrap(); + + let pubk_1 = StacksPublicKey::from_private(&privk_1); + let pubk_2 = StacksPublicKey::from_private(&privk_2); + let pubk_3 = StacksPublicKey::from_private(&privk_3); + + let random_sponsor = StacksPrivateKey::new(); // what the origin sees + + let auth = TransactionAuth::Sponsored( + TransactionSpendingCondition::new_singlesig_p2pkh(StacksPublicKey::from_private( + &origin_privk, + )) + .unwrap(), + TransactionSpendingCondition::new_singlesig_p2pkh(StacksPublicKey::from_private( + &random_sponsor, + )) + .unwrap(), + ); + + let real_sponsor = TransactionSpendingCondition::new_multisig_order_independent_p2sh( + 2, + vec![pubk_1.clone(), pubk_2.clone(), pubk_3.clone()], + ) + .unwrap(); + + let origin_address = auth.origin().address_mainnet(); + let sponsor_address = real_sponsor.address_mainnet(); + + assert_eq!( + origin_address, + StacksAddress { + version: C32_ADDRESS_VERSION_MAINNET_SINGLESIG, + bytes: Hash160::from_hex("3597aaa4bde720be93e3829aae24e76e7fcdfd3e").unwrap(), + } + ); + assert_eq!( + sponsor_address, + StacksAddress { + version: C32_ADDRESS_VERSION_MAINNET_MULTISIG, + bytes: Hash160::from_hex("a23ea89d6529ac48ac766f720e480beec7f19273").unwrap(), + } + ); + + let txs = tx_stacks_transaction_test_txs(&auth); + + for mut tx in txs { + assert_eq!(tx.auth().origin().num_signatures(), 0); + assert_eq!(tx.auth().sponsor().unwrap().num_signatures(), 0); + + tx.set_tx_fee(123); + tx.set_sponsor_nonce(456).unwrap(); + let mut tx_signer = StacksTransactionSigner::new(&tx); + + tx_signer.sign_origin(&origin_privk).unwrap(); + + // sponsor sets and pays fee after origin signs + let mut origin_tx = tx_signer.get_tx_incomplete(); + origin_tx.auth.set_sponsor(real_sponsor.clone()).unwrap(); + origin_tx.set_tx_fee(456); + origin_tx.set_sponsor_nonce(789).unwrap(); + + let initial_sig_hash = tx_signer.sighash; + let sig1 = origin_tx + .sign_no_append_sponsor(&initial_sig_hash, &privk_1) + .unwrap(); + let sig2 = origin_tx + .sign_no_append_sponsor(&initial_sig_hash, &privk_2) + .unwrap(); + + let _ = + origin_tx.append_sponsor_signature(sig1, TransactionPublicKeyEncoding::Compressed); + let _ = + origin_tx.append_sponsor_signature(sig2, TransactionPublicKeyEncoding::Compressed); + let _ = origin_tx.append_next_sponsor(&pubk_3); + + tx.set_tx_fee(456); + tx.set_sponsor_nonce(789).unwrap(); + + check_oversign_origin_singlesig(&mut origin_tx); + check_oversign_sponsor_multisig(&mut origin_tx); + + assert_eq!(origin_tx.auth().origin().num_signatures(), 1); + assert_eq!(origin_tx.auth().sponsor().unwrap().num_signatures(), 2); + + // tx and origin_tx are otherwise equal + assert_eq!(tx.version, origin_tx.version); + assert_eq!(tx.chain_id, origin_tx.chain_id); + assert_eq!(tx.get_tx_fee(), origin_tx.get_tx_fee()); + assert_eq!(tx.get_origin_nonce(), origin_tx.get_origin_nonce()); + assert_eq!(tx.get_sponsor_nonce(), origin_tx.get_sponsor_nonce()); + assert_eq!(tx.anchor_mode, origin_tx.anchor_mode); + assert_eq!(tx.post_condition_mode, origin_tx.post_condition_mode); + assert_eq!(tx.post_conditions, origin_tx.post_conditions); + assert_eq!(tx.payload, origin_tx.payload); + + // auth is standard and first two auth fields are signatures for compressed keys. + // third field is the third public key + match origin_tx.auth { + TransactionAuth::Sponsored(ref origin, ref sponsor) => { + match origin { + TransactionSpendingCondition::Singlesig(ref data) => { + assert_eq!(data.key_encoding, TransactionPublicKeyEncoding::Compressed); + assert_eq!(data.signer, origin_address.bytes); + } + _ => assert!(false), + } + match sponsor { + TransactionSpendingCondition::OrderIndependentMultisig(ref data) => { + assert_eq!(data.signer, sponsor_address.bytes); + assert_eq!(data.fields.len(), 3); + assert!(data.fields[0].is_signature()); + assert!(data.fields[1].is_signature()); + assert!(data.fields[2].is_public_key()); + + assert_eq!( + data.fields[0].as_signature().unwrap().0, + TransactionPublicKeyEncoding::Compressed + ); + assert_eq!( + data.fields[1].as_signature().unwrap().0, + TransactionPublicKeyEncoding::Compressed + ); + assert_eq!(data.fields[2].as_public_key().unwrap(), pubk_3); + } + _ => assert!(false), + } + } + _ => assert!(false), + }; + + test_signature_and_corruption(&origin_tx, true, false); + test_signature_and_corruption(&origin_tx, false, true); + } + } + + #[test] + fn tx_stacks_transaction_sign_verify_standard_order_independent_p2sh_uncompressed() { + let privk_1 = StacksPrivateKey::from_hex( + "6d430bb91222408e7706c9001cfaeb91b08c2be6d5ac95779ab52c6b431950e0", + ) + .unwrap(); + let privk_2 = StacksPrivateKey::from_hex( + "2a584d899fed1d24e26b524f202763c8ab30260167429f157f1c119f550fa6af", + ) + .unwrap(); + let privk_3 = StacksPrivateKey::from_hex( + "d5200dee706ee53ae98a03fba6cf4fdcc5084c30cfa9e1b3462dcdeaa3e0f1d2", + ) + .unwrap(); + + let pubk_1 = StacksPublicKey::from_private(&privk_1); + let pubk_2 = StacksPublicKey::from_private(&privk_2); + let pubk_3 = StacksPublicKey::from_private(&privk_3); + + let origin_auth = TransactionAuth::Standard( + TransactionSpendingCondition::new_multisig_order_independent_p2sh( + 2, + vec![pubk_1.clone(), pubk_2.clone(), pubk_3.clone()], + ) + .unwrap(), + ); + + let origin_address = origin_auth.origin().address_mainnet(); + assert_eq!( + origin_address, + StacksAddress { + version: C32_ADDRESS_VERSION_MAINNET_MULTISIG, + bytes: Hash160::from_hex("73a8b4a751a678fe83e9d35ce301371bb3d397f7").unwrap(), + } + ); + + let txs = tx_stacks_transaction_test_txs(&origin_auth); + + for mut tx in txs { + assert_eq!(tx.auth().origin().num_signatures(), 0); + + let tx_signer = StacksTransactionSigner::new(&tx); + + let initial_sig_hash = tx.sign_begin(); + let sig3 = tx + .sign_no_append_origin(&initial_sig_hash, &privk_3) + .unwrap(); + let sig2 = tx + .sign_no_append_origin(&initial_sig_hash, &privk_2) + .unwrap(); + + let _ = tx.append_next_origin(&pubk_1); + let _ = tx.append_origin_signature(sig2, TransactionPublicKeyEncoding::Uncompressed); + let _ = tx.append_origin_signature(sig3, TransactionPublicKeyEncoding::Uncompressed); + + check_oversign_origin_multisig(&mut tx); + check_sign_no_sponsor(&mut tx); + + assert_eq!(tx.auth().origin().num_signatures(), 2); + + // auth is standard and first two auth fields are signatures for compressed keys. + // third field is the third public key + match tx.auth { + TransactionAuth::Standard(ref origin) => match origin { + TransactionSpendingCondition::OrderIndependentMultisig(ref data) => { + assert_eq!(data.signer, origin_address.bytes); + assert_eq!(data.fields.len(), 3); + assert!(data.fields[0].is_public_key()); + assert!(data.fields[1].is_signature()); + assert!(data.fields[2].is_signature()); + + assert_eq!(data.fields[0].as_public_key().unwrap(), pubk_1); + assert_eq!( + data.fields[1].as_signature().unwrap().0, + TransactionPublicKeyEncoding::Uncompressed + ); + assert_eq!( + data.fields[2].as_signature().unwrap().0, + TransactionPublicKeyEncoding::Uncompressed + ); + } + _ => assert!(false), + }, + _ => assert!(false), + }; + + test_signature_and_corruption(&tx, true, false); + } + } + + #[test] + fn tx_stacks_transaction_sign_verify_sponsored_order_independent_p2sh_uncompressed() { + let origin_privk = StacksPrivateKey::from_hex( + "807bbe9e471ac976592cc35e3056592ecc0f778ee653fced3b491a122dd8d59701", + ) + .unwrap(); + + let privk_1 = StacksPrivateKey::from_hex( + "6d430bb91222408e7706c9001cfaeb91b08c2be6d5ac95779ab52c6b431950e0", + ) + .unwrap(); + let privk_2 = StacksPrivateKey::from_hex( + "2a584d899fed1d24e26b524f202763c8ab30260167429f157f1c119f550fa6af", + ) + .unwrap(); + let privk_3 = StacksPrivateKey::from_hex( + "d5200dee706ee53ae98a03fba6cf4fdcc5084c30cfa9e1b3462dcdeaa3e0f1d2", + ) + .unwrap(); + + let pubk_1 = StacksPublicKey::from_private(&privk_1); + let pubk_2 = StacksPublicKey::from_private(&privk_2); + let pubk_3 = StacksPublicKey::from_private(&privk_3); + + let random_sponsor = StacksPrivateKey::new(); // what the origin sees + + let auth = TransactionAuth::Sponsored( + TransactionSpendingCondition::new_singlesig_p2pkh(StacksPublicKey::from_private( + &origin_privk, + )) + .unwrap(), + TransactionSpendingCondition::new_singlesig_p2pkh(StacksPublicKey::from_private( + &random_sponsor, + )) + .unwrap(), + ); + + let real_sponsor = TransactionSpendingCondition::new_multisig_order_independent_p2sh( + 2, + vec![pubk_1.clone(), pubk_2.clone(), pubk_3.clone()], + ) + .unwrap(); + + let origin_address = auth.origin().address_mainnet(); + let sponsor_address = real_sponsor.address_mainnet(); + + assert_eq!( + origin_address, + StacksAddress { + version: C32_ADDRESS_VERSION_MAINNET_SINGLESIG, + bytes: Hash160::from_hex("3597aaa4bde720be93e3829aae24e76e7fcdfd3e").unwrap(), + } + ); + assert_eq!( + sponsor_address, + StacksAddress { + version: C32_ADDRESS_VERSION_MAINNET_MULTISIG, + bytes: Hash160::from_hex("73a8b4a751a678fe83e9d35ce301371bb3d397f7").unwrap(), + } + ); + + let txs = tx_stacks_transaction_test_txs(&auth); + + for mut tx in txs { + assert_eq!(tx.auth().origin().num_signatures(), 0); + assert_eq!(tx.auth().sponsor().unwrap().num_signatures(), 0); + + tx.set_tx_fee(123); + tx.set_sponsor_nonce(456).unwrap(); + let mut tx_signer = StacksTransactionSigner::new(&tx); + + tx_signer.sign_origin(&origin_privk).unwrap(); + + // sponsor sets and pays fee after origin signs + let mut origin_tx = tx_signer.get_tx_incomplete(); + origin_tx.auth.set_sponsor(real_sponsor.clone()).unwrap(); + origin_tx.set_tx_fee(456); + origin_tx.set_sponsor_nonce(789).unwrap(); + + let initial_sig_hash = tx_signer.sighash; + let sig1 = origin_tx + .sign_no_append_sponsor(&initial_sig_hash, &privk_1) + .unwrap(); + let sig2 = origin_tx + .sign_no_append_sponsor(&initial_sig_hash, &privk_2) + .unwrap(); + + let _ = origin_tx + .append_sponsor_signature(sig1, TransactionPublicKeyEncoding::Uncompressed); + let _ = origin_tx + .append_sponsor_signature(sig2, TransactionPublicKeyEncoding::Uncompressed); + let _ = origin_tx.append_next_sponsor(&pubk_3); + + tx.set_tx_fee(456); + tx.set_sponsor_nonce(789).unwrap(); + + check_oversign_origin_singlesig(&mut origin_tx); + check_oversign_sponsor_multisig(&mut origin_tx); + + assert_eq!(origin_tx.auth().origin().num_signatures(), 1); + assert_eq!(origin_tx.auth().sponsor().unwrap().num_signatures(), 2); + + // tx and origin_tx are otherwise equal + assert_eq!(tx.version, origin_tx.version); + assert_eq!(tx.chain_id, origin_tx.chain_id); + assert_eq!(tx.get_tx_fee(), origin_tx.get_tx_fee()); + assert_eq!(tx.get_origin_nonce(), origin_tx.get_origin_nonce()); + assert_eq!(tx.get_sponsor_nonce(), origin_tx.get_sponsor_nonce()); + assert_eq!(tx.anchor_mode, origin_tx.anchor_mode); + assert_eq!(tx.post_condition_mode, origin_tx.post_condition_mode); + assert_eq!(tx.post_conditions, origin_tx.post_conditions); + assert_eq!(tx.payload, origin_tx.payload); + + // auth is standard and first two auth fields are signatures for compressed keys. + // third field is the third public key + match origin_tx.auth { + TransactionAuth::Sponsored(ref origin, ref sponsor) => { + match origin { + TransactionSpendingCondition::Singlesig(ref data) => { + assert_eq!(data.key_encoding, TransactionPublicKeyEncoding::Compressed); + assert_eq!(data.signer, origin_address.bytes); + } + _ => assert!(false), + } + match sponsor { + TransactionSpendingCondition::OrderIndependentMultisig(ref data) => { + assert_eq!(data.signer, sponsor_address.bytes); + assert_eq!(data.fields.len(), 3); + assert!(data.fields[0].is_signature()); + assert!(data.fields[1].is_signature()); + assert!(data.fields[2].is_public_key()); + + assert_eq!( + data.fields[0].as_signature().unwrap().0, + TransactionPublicKeyEncoding::Uncompressed + ); + assert_eq!( + data.fields[1].as_signature().unwrap().0, + TransactionPublicKeyEncoding::Uncompressed + ); + assert_eq!(data.fields[2].as_public_key().unwrap(), pubk_3); + } + _ => assert!(false), + } + } + _ => assert!(false), + }; + + test_signature_and_corruption(&origin_tx, true, false); + test_signature_and_corruption(&origin_tx, false, true); + } + } + + #[test] + fn tx_stacks_transaction_sign_verify_standard_order_independent_p2sh_mixed() { + let privk_1 = StacksPrivateKey::from_hex( + "6d430bb91222408e7706c9001cfaeb91b08c2be6d5ac95779ab52c6b431950e001", + ) + .unwrap(); + let privk_2 = StacksPrivateKey::from_hex( + "2a584d899fed1d24e26b524f202763c8ab30260167429f157f1c119f550fa6af01", + ) + .unwrap(); + let privk_3 = StacksPrivateKey::from_hex( + "d5200dee706ee53ae98a03fba6cf4fdcc5084c30cfa9e1b3462dcdeaa3e0f1d201", + ) + .unwrap(); + + let pubk_1 = StacksPublicKey::from_private(&privk_1); + let pubk_2 = StacksPublicKey::from_private(&privk_2); + let pubk_3 = StacksPublicKey::from_private(&privk_3); + + let origin_auth = TransactionAuth::Standard( + TransactionSpendingCondition::new_multisig_order_independent_p2sh( + 2, + vec![pubk_1.clone(), pubk_2.clone(), pubk_3.clone()], + ) + .unwrap(), + ); + + let origin_address = origin_auth.origin().address_mainnet(); + assert_eq!( + origin_address, + StacksAddress { + version: C32_ADDRESS_VERSION_MAINNET_MULTISIG, + bytes: Hash160::from_hex("a23ea89d6529ac48ac766f720e480beec7f19273").unwrap(), + } + ); + + let txs = tx_stacks_transaction_test_txs(&origin_auth); + + for mut tx in txs { + assert_eq!(tx.auth().origin().num_signatures(), 0); + + let tx_signer = StacksTransactionSigner::new(&tx); + + let initial_sig_hash = tx.sign_begin(); + let sig3 = tx + .sign_no_append_origin(&initial_sig_hash, &privk_3) + .unwrap(); + let sig1 = tx + .sign_no_append_origin(&initial_sig_hash, &privk_1) + .unwrap(); + + let _ = tx.append_origin_signature(sig1, TransactionPublicKeyEncoding::Compressed); + let _ = tx.append_next_origin(&pubk_2); + let _ = tx.append_origin_signature(sig3, TransactionPublicKeyEncoding::Compressed); + + check_oversign_origin_multisig(&mut tx); + check_sign_no_sponsor(&mut tx); + + assert_eq!(tx.auth().origin().num_signatures(), 2); + + // auth is standard and first two auth fields are signatures for compressed keys. + // third field is the third public key + match tx.auth { + TransactionAuth::Standard(ref origin) => match origin { + TransactionSpendingCondition::OrderIndependentMultisig(ref data) => { + assert_eq!(data.signer, origin_address.bytes); + assert_eq!(data.fields.len(), 3); + assert!(data.fields[0].is_signature()); + assert!(data.fields[1].is_public_key()); + assert!(data.fields[2].is_signature()); + + assert_eq!(data.fields[1].as_public_key().unwrap(), pubk_2); + assert_eq!( + data.fields[0].as_signature().unwrap().0, + TransactionPublicKeyEncoding::Compressed + ); + assert_eq!( + data.fields[2].as_signature().unwrap().0, + TransactionPublicKeyEncoding::Compressed + ); + } + _ => assert!(false), + }, + _ => assert!(false), + }; + + test_signature_and_corruption(&tx, true, false); + } + } + + #[test] + fn tx_stacks_transaction_sign_verify_standard_order_independent_p2sh_mixed_3_out_of_9() { + let privk_1 = StacksPrivateKey::from_hex( + "6d430bb91222408e7706c9001cfaeb91b08c2be6d5ac95779ab52c6b431950e001", + ) + .unwrap(); + let privk_2 = StacksPrivateKey::from_hex( + "2a584d899fed1d24e26b524f202763c8ab30260167429f157f1c119f550fa6af01", + ) + .unwrap(); + let privk_3 = StacksPrivateKey::from_hex( + "d5200dee706ee53ae98a03fba6cf4fdcc5084c30cfa9e1b3462dcdeaa3e0f1d201", + ) + .unwrap(); + let privk_4 = StacksPrivateKey::from_hex( + "3beb8916404874f5d5de162c95470951de5b4a7f6ec8d7a20511551821f16db501", + ) + .unwrap(); + let privk_5 = StacksPrivateKey::from_hex( + "601aa0939e98efec29a4dc645377c9d4acaa0b7318444ec8fd7d090d0b36d85b01", + ) + .unwrap(); + let privk_6 = StacksPrivateKey::from_hex( + "5a4ca3db5a3b36bc32d9f2f0894435cbc4b2b1207e95ee283616d9a0797210da01", + ) + .unwrap(); + let privk_7 = StacksPrivateKey::from_hex( + "068856c242bfebdc57700fa598fae4e8ebb6b5f6bf932177018071489737d3ff01", + ) + .unwrap(); + let privk_8 = StacksPrivateKey::from_hex( + "a07a397f6b31c803f5d7f0c4620576cb03c66c12cdbdb6cd91d001d6f0052de201", + ) + .unwrap(); + let privk_9 = StacksPrivateKey::from_hex( + "f395129abc42c57e394dcceebeca9f51f0cb0a3f1c3a899d62e40b9340c7cc1101", + ) + .unwrap(); + + let pubk_1 = StacksPublicKey::from_private(&privk_1); + let pubk_2 = StacksPublicKey::from_private(&privk_2); + let pubk_3 = StacksPublicKey::from_private(&privk_3); + let pubk_4 = StacksPublicKey::from_private(&privk_4); + let pubk_5 = StacksPublicKey::from_private(&privk_5); + let pubk_6 = StacksPublicKey::from_private(&privk_6); + let pubk_7 = StacksPublicKey::from_private(&privk_7); + let pubk_8 = StacksPublicKey::from_private(&privk_8); + let pubk_9 = StacksPublicKey::from_private(&privk_9); + + let origin_auth = TransactionAuth::Standard( + TransactionSpendingCondition::new_multisig_order_independent_p2sh( + 3, + vec![ + pubk_1.clone(), + pubk_2.clone(), + pubk_3.clone(), + pubk_4.clone(), + pubk_5.clone(), + pubk_6.clone(), + pubk_7.clone(), + pubk_8.clone(), + pubk_9.clone(), + ], + ) + .unwrap(), + ); + + let origin_address = origin_auth.origin().address_mainnet(); + assert_eq!( + origin_address, + StacksAddress { + version: C32_ADDRESS_VERSION_MAINNET_MULTISIG, + bytes: Hash160::from_hex("315d672961ef2583faf4107ab4ec5566014c867c").unwrap(), + } + ); + + let txs = tx_stacks_transaction_test_txs(&origin_auth); + + for mut tx in txs { + assert_eq!(tx.auth().origin().num_signatures(), 0); + + let tx_signer = StacksTransactionSigner::new(&tx); + + let initial_sig_hash = tx.sign_begin(); + let sig3 = tx + .sign_no_append_origin(&initial_sig_hash, &privk_3) + .unwrap(); + let sig1 = tx + .sign_no_append_origin(&initial_sig_hash, &privk_1) + .unwrap(); + let sig9 = tx + .sign_no_append_origin(&initial_sig_hash, &privk_9) + .unwrap(); + + let _ = tx.append_origin_signature(sig1, TransactionPublicKeyEncoding::Compressed); + let _ = tx.append_next_origin(&pubk_2); + let _ = tx.append_origin_signature(sig3, TransactionPublicKeyEncoding::Compressed); + let _ = tx.append_next_origin(&pubk_4); + let _ = tx.append_next_origin(&pubk_5); + let _ = tx.append_next_origin(&pubk_6); + let _ = tx.append_next_origin(&pubk_7); + let _ = tx.append_next_origin(&pubk_8); + let _ = tx.append_origin_signature(sig9, TransactionPublicKeyEncoding::Compressed); + + check_oversign_origin_multisig(&mut tx); + check_sign_no_sponsor(&mut tx); + + assert_eq!(tx.auth().origin().num_signatures(), 3); + + // auth is standard and first two auth fields are signatures for compressed keys. + // third field is the third public key + match tx.auth { + TransactionAuth::Standard(ref origin) => match origin { + TransactionSpendingCondition::OrderIndependentMultisig(ref data) => { + assert_eq!(data.signer, origin_address.bytes); + assert_eq!(data.fields.len(), 9); + assert!(data.fields[0].is_signature()); + assert!(data.fields[1].is_public_key()); + assert!(data.fields[2].is_signature()); + assert!(data.fields[3].is_public_key()); + assert!(data.fields[4].is_public_key()); + assert!(data.fields[5].is_public_key()); + assert!(data.fields[6].is_public_key()); + assert!(data.fields[7].is_public_key()); + assert!(data.fields[8].is_signature()); + + assert_eq!(data.fields[1].as_public_key().unwrap(), pubk_2); + assert_eq!(data.fields[3].as_public_key().unwrap(), pubk_4); + assert_eq!(data.fields[4].as_public_key().unwrap(), pubk_5); + assert_eq!(data.fields[5].as_public_key().unwrap(), pubk_6); + assert_eq!(data.fields[6].as_public_key().unwrap(), pubk_7); + assert_eq!(data.fields[7].as_public_key().unwrap(), pubk_8); + assert_eq!( + data.fields[0].as_signature().unwrap().0, + TransactionPublicKeyEncoding::Compressed + ); + assert_eq!( + data.fields[2].as_signature().unwrap().0, + TransactionPublicKeyEncoding::Compressed + ); + assert_eq!( + data.fields[8].as_signature().unwrap().0, + TransactionPublicKeyEncoding::Compressed + ); + } + _ => assert!(false), + }, + _ => assert!(false), + }; + + test_signature_and_corruption(&tx, true, false); + } + } + + #[test] + fn tx_stacks_transaction_sign_verify_sponsored_order_independent_p2sh_mixed() { + let origin_privk = StacksPrivateKey::from_hex( + "807bbe9e471ac976592cc35e3056592ecc0f778ee653fced3b491a122dd8d59701", + ) + .unwrap(); + + let privk_1 = StacksPrivateKey::from_hex( + "6d430bb91222408e7706c9001cfaeb91b08c2be6d5ac95779ab52c6b431950e001", + ) + .unwrap(); + let privk_2 = StacksPrivateKey::from_hex( + "2a584d899fed1d24e26b524f202763c8ab30260167429f157f1c119f550fa6af01", + ) + .unwrap(); + let privk_3 = StacksPrivateKey::from_hex( + "d5200dee706ee53ae98a03fba6cf4fdcc5084c30cfa9e1b3462dcdeaa3e0f1d201", + ) + .unwrap(); + + let pubk_1 = StacksPublicKey::from_private(&privk_1); + let pubk_2 = StacksPublicKey::from_private(&privk_2); + let pubk_3 = StacksPublicKey::from_private(&privk_3); + + let random_sponsor = StacksPrivateKey::new(); // what the origin sees + + let auth = TransactionAuth::Sponsored( + TransactionSpendingCondition::new_singlesig_p2pkh(StacksPublicKey::from_private( + &origin_privk, + )) + .unwrap(), + TransactionSpendingCondition::new_singlesig_p2pkh(StacksPublicKey::from_private( + &random_sponsor, + )) + .unwrap(), + ); + + let real_sponsor = TransactionSpendingCondition::new_multisig_order_independent_p2sh( + 2, + vec![pubk_1.clone(), pubk_2.clone(), pubk_3.clone()], + ) + .unwrap(); + + let origin_address = auth.origin().address_mainnet(); + let sponsor_address = real_sponsor.address_mainnet(); + + assert_eq!( + origin_address, + StacksAddress { + version: C32_ADDRESS_VERSION_MAINNET_SINGLESIG, + bytes: Hash160::from_hex("3597aaa4bde720be93e3829aae24e76e7fcdfd3e").unwrap(), + } + ); + assert_eq!( + sponsor_address, + StacksAddress { + version: C32_ADDRESS_VERSION_MAINNET_MULTISIG, + bytes: Hash160::from_hex("a23ea89d6529ac48ac766f720e480beec7f19273").unwrap(), + } + ); + + let txs = tx_stacks_transaction_test_txs(&auth); + + for mut tx in txs { + assert_eq!(tx.auth().origin().num_signatures(), 0); + assert_eq!(tx.auth().sponsor().unwrap().num_signatures(), 0); + + tx.set_tx_fee(123); + tx.set_sponsor_nonce(456).unwrap(); + let mut tx_signer = StacksTransactionSigner::new(&tx); + + tx_signer.sign_origin(&origin_privk).unwrap(); + + // sponsor sets and pays fee after origin signs + let mut origin_tx = tx_signer.get_tx_incomplete(); + origin_tx.auth.set_sponsor(real_sponsor.clone()).unwrap(); + origin_tx.set_tx_fee(456); + origin_tx.set_sponsor_nonce(789).unwrap(); + + let initial_sig_hash = tx_signer.sighash; + let sig1 = origin_tx + .sign_no_append_sponsor(&initial_sig_hash, &privk_1) + .unwrap(); + let sig3 = origin_tx + .sign_no_append_sponsor(&initial_sig_hash, &privk_3) + .unwrap(); + + let _ = + origin_tx.append_sponsor_signature(sig1, TransactionPublicKeyEncoding::Compressed); + let _ = origin_tx.append_next_sponsor(&pubk_2); + let _ = + origin_tx.append_sponsor_signature(sig3, TransactionPublicKeyEncoding::Compressed); + + tx.set_tx_fee(456); + tx.set_sponsor_nonce(789).unwrap(); + + check_oversign_origin_singlesig(&mut origin_tx); + check_oversign_sponsor_multisig(&mut origin_tx); + + assert_eq!(origin_tx.auth().origin().num_signatures(), 1); + assert_eq!(origin_tx.auth().sponsor().unwrap().num_signatures(), 2); + + // tx and origin_tx are otherwise equal + assert_eq!(tx.version, origin_tx.version); + assert_eq!(tx.chain_id, origin_tx.chain_id); + assert_eq!(tx.get_tx_fee(), origin_tx.get_tx_fee()); + assert_eq!(tx.get_origin_nonce(), origin_tx.get_origin_nonce()); + assert_eq!(tx.get_sponsor_nonce(), origin_tx.get_sponsor_nonce()); + assert_eq!(tx.anchor_mode, origin_tx.anchor_mode); + assert_eq!(tx.post_condition_mode, origin_tx.post_condition_mode); + assert_eq!(tx.post_conditions, origin_tx.post_conditions); + assert_eq!(tx.payload, origin_tx.payload); + + // auth is standard and first two auth fields are signatures for compressed keys. + // third field is the third public key + match origin_tx.auth { + TransactionAuth::Sponsored(ref origin, ref sponsor) => { + match origin { + TransactionSpendingCondition::Singlesig(ref data) => { + assert_eq!(data.key_encoding, TransactionPublicKeyEncoding::Compressed); + assert_eq!(data.signer, origin_address.bytes); + } + _ => assert!(false), + } + match sponsor { + TransactionSpendingCondition::OrderIndependentMultisig(ref data) => { + assert_eq!(data.signer, sponsor_address.bytes); + assert_eq!(data.fields.len(), 3); + assert!(data.fields[0].is_signature()); + assert!(data.fields[1].is_public_key()); + assert!(data.fields[2].is_signature()); + + assert_eq!( + data.fields[0].as_signature().unwrap().0, + TransactionPublicKeyEncoding::Compressed + ); + assert_eq!( + data.fields[2].as_signature().unwrap().0, + TransactionPublicKeyEncoding::Compressed + ); + assert_eq!(data.fields[1].as_public_key().unwrap(), pubk_2); + } + _ => assert!(false), + } + } + _ => assert!(false), + }; + + test_signature_and_corruption(&origin_tx, true, false); + test_signature_and_corruption(&origin_tx, false, true); + } + } + + #[test] + fn tx_stacks_transaction_sign_verify_sponsored_order_independent_p2sh_mixed_5_out_of_5() { + let origin_privk = StacksPrivateKey::from_hex( + "807bbe9e471ac976592cc35e3056592ecc0f778ee653fced3b491a122dd8d59701", + ) + .unwrap(); + + let privk_1 = StacksPrivateKey::from_hex( + "6d430bb91222408e7706c9001cfaeb91b08c2be6d5ac95779ab52c6b431950e001", + ) + .unwrap(); + let privk_2 = StacksPrivateKey::from_hex( + "2a584d899fed1d24e26b524f202763c8ab30260167429f157f1c119f550fa6af01", + ) + .unwrap(); + let privk_3 = StacksPrivateKey::from_hex( + "d5200dee706ee53ae98a03fba6cf4fdcc5084c30cfa9e1b3462dcdeaa3e0f1d201", + ) + .unwrap(); + let privk_4 = StacksPrivateKey::from_hex( + "3beb8916404874f5d5de162c95470951de5b4a7f6ec8d7a20511551821f16db501", + ) + .unwrap(); + let privk_5 = StacksPrivateKey::from_hex( + "601aa0939e98efec29a4dc645377c9d4acaa0b7318444ec8fd7d090d0b36d85b01", + ) + .unwrap(); + + let pubk_1 = StacksPublicKey::from_private(&privk_1); + let pubk_2 = StacksPublicKey::from_private(&privk_2); + let pubk_3 = StacksPublicKey::from_private(&privk_3); + let pubk_4 = StacksPublicKey::from_private(&privk_4); + let pubk_5 = StacksPublicKey::from_private(&privk_5); + + let random_sponsor = StacksPrivateKey::new(); // what the origin sees + + let auth = TransactionAuth::Sponsored( + TransactionSpendingCondition::new_singlesig_p2pkh(StacksPublicKey::from_private( + &origin_privk, + )) + .unwrap(), + TransactionSpendingCondition::new_singlesig_p2pkh(StacksPublicKey::from_private( + &random_sponsor, + )) + .unwrap(), + ); + + let real_sponsor = TransactionSpendingCondition::new_multisig_order_independent_p2sh( + 5, + vec![ + pubk_1.clone(), + pubk_2.clone(), + pubk_3.clone(), + pubk_4.clone(), + pubk_5.clone(), + ], + ) + .unwrap(); + + let origin_address = auth.origin().address_mainnet(); + let sponsor_address = real_sponsor.address_mainnet(); + + assert_eq!( + origin_address, + StacksAddress { + version: C32_ADDRESS_VERSION_MAINNET_SINGLESIG, + bytes: Hash160::from_hex("3597aaa4bde720be93e3829aae24e76e7fcdfd3e").unwrap(), + } + ); + assert_eq!( + sponsor_address, + StacksAddress { + version: C32_ADDRESS_VERSION_MAINNET_MULTISIG, + bytes: Hash160::from_hex("fc29d14be615b0f72a66b920040c2b5b8124990b").unwrap(), + } + ); + + let txs = tx_stacks_transaction_test_txs(&auth); + + for mut tx in txs { + assert_eq!(tx.auth().origin().num_signatures(), 0); + assert_eq!(tx.auth().sponsor().unwrap().num_signatures(), 0); + + tx.set_tx_fee(123); + tx.set_sponsor_nonce(456).unwrap(); + let mut tx_signer = StacksTransactionSigner::new(&tx); + + tx_signer.sign_origin(&origin_privk).unwrap(); + + // sponsor sets and pays fee after origin signs + let mut origin_tx = tx_signer.get_tx_incomplete(); + origin_tx.auth.set_sponsor(real_sponsor.clone()).unwrap(); + origin_tx.set_tx_fee(456); + origin_tx.set_sponsor_nonce(789).unwrap(); + + let initial_sig_hash = tx_signer.sighash; + let sig1 = origin_tx + .sign_no_append_sponsor(&initial_sig_hash, &privk_1) + .unwrap(); + let sig3 = origin_tx + .sign_no_append_sponsor(&initial_sig_hash, &privk_3) + .unwrap(); + let sig2 = origin_tx + .sign_no_append_sponsor(&initial_sig_hash, &privk_2) + .unwrap(); + let sig4 = origin_tx + .sign_no_append_sponsor(&initial_sig_hash, &privk_4) + .unwrap(); + let sig5 = origin_tx + .sign_no_append_sponsor(&initial_sig_hash, &privk_5) + .unwrap(); + + let _ = + origin_tx.append_sponsor_signature(sig1, TransactionPublicKeyEncoding::Compressed); + let _ = + origin_tx.append_sponsor_signature(sig2, TransactionPublicKeyEncoding::Compressed); + let _ = + origin_tx.append_sponsor_signature(sig3, TransactionPublicKeyEncoding::Compressed); + let _ = + origin_tx.append_sponsor_signature(sig4, TransactionPublicKeyEncoding::Compressed); + let _ = + origin_tx.append_sponsor_signature(sig5, TransactionPublicKeyEncoding::Compressed); + + tx.set_tx_fee(456); + tx.set_sponsor_nonce(789).unwrap(); + + check_oversign_origin_singlesig(&mut origin_tx); + check_oversign_sponsor_multisig(&mut origin_tx); + + assert_eq!(origin_tx.auth().origin().num_signatures(), 1); + assert_eq!(origin_tx.auth().sponsor().unwrap().num_signatures(), 5); + + // tx and origin_tx are otherwise equal + assert_eq!(tx.version, origin_tx.version); + assert_eq!(tx.chain_id, origin_tx.chain_id); + assert_eq!(tx.get_tx_fee(), origin_tx.get_tx_fee()); + assert_eq!(tx.get_origin_nonce(), origin_tx.get_origin_nonce()); + assert_eq!(tx.get_sponsor_nonce(), origin_tx.get_sponsor_nonce()); + assert_eq!(tx.anchor_mode, origin_tx.anchor_mode); + assert_eq!(tx.post_condition_mode, origin_tx.post_condition_mode); + assert_eq!(tx.post_conditions, origin_tx.post_conditions); + assert_eq!(tx.payload, origin_tx.payload); + + // auth is standard and first two auth fields are signatures for compressed keys. + // third field is the third public key + match origin_tx.auth { + TransactionAuth::Sponsored(ref origin, ref sponsor) => { + match origin { + TransactionSpendingCondition::Singlesig(ref data) => { + assert_eq!(data.key_encoding, TransactionPublicKeyEncoding::Compressed); + assert_eq!(data.signer, origin_address.bytes); + } + _ => assert!(false), + } + match sponsor { + TransactionSpendingCondition::OrderIndependentMultisig(ref data) => { + assert_eq!(data.signer, sponsor_address.bytes); + assert_eq!(data.fields.len(), 5); + assert!(data.fields[0].is_signature()); + assert!(data.fields[1].is_signature()); + assert!(data.fields[2].is_signature()); + assert!(data.fields[3].is_signature()); + assert!(data.fields[4].is_signature()); + + assert_eq!( + data.fields[0].as_signature().unwrap().0, + TransactionPublicKeyEncoding::Compressed + ); + assert_eq!( + data.fields[1].as_signature().unwrap().0, + TransactionPublicKeyEncoding::Compressed + ); + assert_eq!( + data.fields[2].as_signature().unwrap().0, + TransactionPublicKeyEncoding::Compressed + ); + assert_eq!( + data.fields[3].as_signature().unwrap().0, + TransactionPublicKeyEncoding::Compressed + ); + assert_eq!( + data.fields[4].as_signature().unwrap().0, + TransactionPublicKeyEncoding::Compressed + ); + } + _ => assert!(false), + } + } + _ => assert!(false), + }; + + test_signature_and_corruption(&origin_tx, true, false); + test_signature_and_corruption(&origin_tx, false, true); + } + } + + #[test] + fn tx_stacks_transaction_sign_verify_standard_order_independent_p2wsh() { + let privk_1 = StacksPrivateKey::from_hex( + "6d430bb91222408e7706c9001cfaeb91b08c2be6d5ac95779ab52c6b431950e001", + ) + .unwrap(); + let privk_2 = StacksPrivateKey::from_hex( + "2a584d899fed1d24e26b524f202763c8ab30260167429f157f1c119f550fa6af01", + ) + .unwrap(); + let privk_3 = StacksPrivateKey::from_hex( + "d5200dee706ee53ae98a03fba6cf4fdcc5084c30cfa9e1b3462dcdeaa3e0f1d201", + ) + .unwrap(); + + let pubk_1 = StacksPublicKey::from_private(&privk_1); + let pubk_2 = StacksPublicKey::from_private(&privk_2); + let pubk_3 = StacksPublicKey::from_private(&privk_3); + + let origin_auth = TransactionAuth::Standard( + TransactionSpendingCondition::new_multisig_order_independent_p2wsh( + 2, + vec![pubk_1.clone(), pubk_2.clone(), pubk_3.clone()], + ) + .unwrap(), + ); + + let origin_address = origin_auth.origin().address_mainnet(); + assert_eq!( + origin_address, + StacksAddress { + version: C32_ADDRESS_VERSION_MAINNET_MULTISIG, + bytes: Hash160::from_hex("f5cfb61a07fb41a32197da01ce033888f0fe94a7").unwrap(), + } + ); + + let txs = tx_stacks_transaction_test_txs(&origin_auth); + + for mut tx in txs { + assert_eq!(tx.auth().origin().num_signatures(), 0); + + let tx_signer = StacksTransactionSigner::new(&tx); + + let initial_sig_hash = tx.sign_begin(); + let sig3 = tx + .sign_no_append_origin(&initial_sig_hash, &privk_3) + .unwrap(); + let sig1 = tx + .sign_no_append_origin(&initial_sig_hash, &privk_1) + .unwrap(); + + let _ = tx.append_origin_signature(sig1, TransactionPublicKeyEncoding::Compressed); + let _ = tx.append_next_origin(&pubk_2); + let _ = tx.append_origin_signature(sig3, TransactionPublicKeyEncoding::Compressed); + + check_oversign_origin_multisig(&mut tx); + check_oversign_origin_multisig_uncompressed(&mut tx); + check_sign_no_sponsor(&mut tx); + + assert_eq!(tx.auth().origin().num_signatures(), 2); + + // auth is standard and first two auth fields are signatures for compressed keys. + // third field is the third public key + match tx.auth { + TransactionAuth::Standard(ref origin) => match origin { + TransactionSpendingCondition::OrderIndependentMultisig(ref data) => { + assert_eq!(data.signer, origin_address.bytes); + assert_eq!(data.fields.len(), 3); + assert!(data.fields[0].is_signature()); + assert!(data.fields[1].is_public_key()); + assert!(data.fields[2].is_signature()); + + assert_eq!(data.fields[1].as_public_key().unwrap(), pubk_2); + assert_eq!( + data.fields[0].as_signature().unwrap().0, + TransactionPublicKeyEncoding::Compressed + ); + assert_eq!( + data.fields[2].as_signature().unwrap().0, + TransactionPublicKeyEncoding::Compressed + ); + } + _ => assert!(false), + }, + _ => assert!(false), + }; + + test_signature_and_corruption(&tx, true, false); + } + } + + #[test] + fn tx_stacks_transaction_sign_verify_standard_order_independent_p2wsh_4_out_of_6() { + let privk_1 = StacksPrivateKey::from_hex( + "6d430bb91222408e7706c9001cfaeb91b08c2be6d5ac95779ab52c6b431950e001", + ) + .unwrap(); + let privk_2 = StacksPrivateKey::from_hex( + "2a584d899fed1d24e26b524f202763c8ab30260167429f157f1c119f550fa6af01", + ) + .unwrap(); + let privk_3 = StacksPrivateKey::from_hex( + "d5200dee706ee53ae98a03fba6cf4fdcc5084c30cfa9e1b3462dcdeaa3e0f1d201", + ) + .unwrap(); + let privk_4 = StacksPrivateKey::from_hex( + "3beb8916404874f5d5de162c95470951de5b4a7f6ec8d7a20511551821f16db501", + ) + .unwrap(); + let privk_5 = StacksPrivateKey::from_hex( + "601aa0939e98efec29a4dc645377c9d4acaa0b7318444ec8fd7d090d0b36d85b01", + ) + .unwrap(); + let privk_6 = StacksPrivateKey::from_hex( + "5a4ca3db5a3b36bc32d9f2f0894435cbc4b2b1207e95ee283616d9a0797210da01", + ) + .unwrap(); + + let pubk_1 = StacksPublicKey::from_private(&privk_1); + let pubk_2 = StacksPublicKey::from_private(&privk_2); + let pubk_3 = StacksPublicKey::from_private(&privk_3); + let pubk_4 = StacksPublicKey::from_private(&privk_4); + let pubk_5 = StacksPublicKey::from_private(&privk_5); + let pubk_6 = StacksPublicKey::from_private(&privk_6); + + let origin_auth = TransactionAuth::Standard( + TransactionSpendingCondition::new_multisig_order_independent_p2wsh( + 4, + vec![ + pubk_1.clone(), + pubk_2.clone(), + pubk_3.clone(), + pubk_4.clone(), + pubk_5.clone(), + pubk_6.clone(), + ], + ) + .unwrap(), + ); + + let origin_address = origin_auth.origin().address_mainnet(); + assert_eq!( + origin_address, + StacksAddress { + version: C32_ADDRESS_VERSION_MAINNET_MULTISIG, + bytes: Hash160::from_hex("e2a4ae14ffb0a4a0982a06d07b97d57268d2bf94").unwrap(), + } + ); + + let txs = tx_stacks_transaction_test_txs(&origin_auth); + + for mut tx in txs { + assert_eq!(tx.auth().origin().num_signatures(), 0); + + let tx_signer = StacksTransactionSigner::new(&tx); + + let initial_sig_hash = tx.sign_begin(); + let sig3 = tx + .sign_no_append_origin(&initial_sig_hash, &privk_3) + .unwrap(); + let sig1 = tx + .sign_no_append_origin(&initial_sig_hash, &privk_1) + .unwrap(); + let sig6 = tx + .sign_no_append_origin(&initial_sig_hash, &privk_6) + .unwrap(); + let sig5 = tx + .sign_no_append_origin(&initial_sig_hash, &privk_5) + .unwrap(); + + let _ = tx.append_origin_signature(sig1, TransactionPublicKeyEncoding::Compressed); + let _ = tx.append_next_origin(&pubk_2); + let _ = tx.append_origin_signature(sig3, TransactionPublicKeyEncoding::Compressed); + let _ = tx.append_next_origin(&pubk_4); + let _ = tx.append_origin_signature(sig5, TransactionPublicKeyEncoding::Compressed); + let _ = tx.append_origin_signature(sig6, TransactionPublicKeyEncoding::Compressed); + + check_oversign_origin_multisig(&mut tx); + check_oversign_origin_multisig_uncompressed(&mut tx); + check_sign_no_sponsor(&mut tx); + + assert_eq!(tx.auth().origin().num_signatures(), 4); + + // auth is standard and first two auth fields are signatures for compressed keys. + // third field is the third public key + match tx.auth { + TransactionAuth::Standard(ref origin) => match origin { + TransactionSpendingCondition::OrderIndependentMultisig(ref data) => { + assert_eq!(data.signer, origin_address.bytes); + assert_eq!(data.fields.len(), 6); + assert!(data.fields[0].is_signature()); + assert!(data.fields[1].is_public_key()); + assert!(data.fields[2].is_signature()); + assert!(data.fields[3].is_public_key()); + assert!(data.fields[4].is_signature()); + assert!(data.fields[5].is_signature()); + + assert_eq!(data.fields[1].as_public_key().unwrap(), pubk_2); + assert_eq!(data.fields[3].as_public_key().unwrap(), pubk_4); + assert_eq!( + data.fields[0].as_signature().unwrap().0, + TransactionPublicKeyEncoding::Compressed + ); + assert_eq!( + data.fields[2].as_signature().unwrap().0, + TransactionPublicKeyEncoding::Compressed + ); + assert_eq!( + data.fields[4].as_signature().unwrap().0, + TransactionPublicKeyEncoding::Compressed + ); + assert_eq!( + data.fields[5].as_signature().unwrap().0, + TransactionPublicKeyEncoding::Compressed + ); + } + _ => assert!(false), + }, + _ => assert!(false), + }; + + test_signature_and_corruption(&tx, true, false); + } + } + + #[test] + fn tx_stacks_transaction_sign_verify_sponsored_order_independent_p2wsh() { + let origin_privk = StacksPrivateKey::from_hex( + "807bbe9e471ac976592cc35e3056592ecc0f778ee653fced3b491a122dd8d59701", + ) + .unwrap(); + + let privk_1 = StacksPrivateKey::from_hex( + "6d430bb91222408e7706c9001cfaeb91b08c2be6d5ac95779ab52c6b431950e001", + ) + .unwrap(); + let privk_2 = StacksPrivateKey::from_hex( + "2a584d899fed1d24e26b524f202763c8ab30260167429f157f1c119f550fa6af01", + ) + .unwrap(); + let privk_3 = StacksPrivateKey::from_hex( + "d5200dee706ee53ae98a03fba6cf4fdcc5084c30cfa9e1b3462dcdeaa3e0f1d201", + ) + .unwrap(); + + let pubk_1 = StacksPublicKey::from_private(&privk_1); + let pubk_2 = StacksPublicKey::from_private(&privk_2); + let pubk_3 = StacksPublicKey::from_private(&privk_3); + + let random_sponsor = StacksPrivateKey::new(); // what the origin sees + + let auth = TransactionAuth::Sponsored( + TransactionSpendingCondition::new_singlesig_p2pkh(StacksPublicKey::from_private( + &origin_privk, + )) + .unwrap(), + TransactionSpendingCondition::new_singlesig_p2pkh(StacksPublicKey::from_private( + &random_sponsor, + )) + .unwrap(), + ); + + let real_sponsor = TransactionSpendingCondition::new_multisig_order_independent_p2wsh( + 2, + vec![pubk_1.clone(), pubk_2.clone(), pubk_3.clone()], + ) + .unwrap(); + + let origin_address = auth.origin().address_mainnet(); + let sponsor_address = real_sponsor.address_mainnet(); + + assert_eq!( + origin_address, + StacksAddress { + version: C32_ADDRESS_VERSION_MAINNET_SINGLESIG, + bytes: Hash160::from_hex("3597aaa4bde720be93e3829aae24e76e7fcdfd3e").unwrap(), + } + ); + assert_eq!( + sponsor_address, + StacksAddress { + version: C32_ADDRESS_VERSION_MAINNET_MULTISIG, + bytes: Hash160::from_hex("f5cfb61a07fb41a32197da01ce033888f0fe94a7").unwrap(), + } + ); + + let txs = tx_stacks_transaction_test_txs(&auth); + + for mut tx in txs { + assert_eq!(tx.auth().origin().num_signatures(), 0); + assert_eq!(tx.auth().sponsor().unwrap().num_signatures(), 0); + + tx.set_tx_fee(123); + tx.set_sponsor_nonce(456).unwrap(); + let mut tx_signer = StacksTransactionSigner::new(&tx); + + tx_signer.sign_origin(&origin_privk).unwrap(); + + // sponsor sets and pays fee after origin signs + let mut origin_tx = tx_signer.get_tx_incomplete(); + origin_tx.auth.set_sponsor(real_sponsor.clone()).unwrap(); + origin_tx.set_tx_fee(456); + origin_tx.set_sponsor_nonce(789).unwrap(); + + let initial_sig_hash = tx_signer.sighash; + let sig1 = origin_tx + .sign_no_append_sponsor(&initial_sig_hash, &privk_1) + .unwrap(); + let sig3 = origin_tx + .sign_no_append_sponsor(&initial_sig_hash, &privk_3) + .unwrap(); + + let _ = + origin_tx.append_sponsor_signature(sig1, TransactionPublicKeyEncoding::Compressed); + let _ = origin_tx.append_next_sponsor(&pubk_2); + let _ = + origin_tx.append_sponsor_signature(sig3, TransactionPublicKeyEncoding::Compressed); + + tx.set_tx_fee(456); + tx.set_sponsor_nonce(789).unwrap(); + + check_oversign_origin_singlesig(&mut origin_tx); + check_oversign_sponsor_multisig(&mut origin_tx); + check_oversign_sponsor_multisig_uncompressed(&mut origin_tx); + + assert_eq!(origin_tx.auth().origin().num_signatures(), 1); + assert_eq!(origin_tx.auth().sponsor().unwrap().num_signatures(), 2); + + // tx and origin_tx are otherwise equal + assert_eq!(tx.version, origin_tx.version); + assert_eq!(tx.chain_id, origin_tx.chain_id); + assert_eq!(tx.get_tx_fee(), origin_tx.get_tx_fee()); + assert_eq!(tx.get_origin_nonce(), origin_tx.get_origin_nonce()); + assert_eq!(tx.get_sponsor_nonce(), origin_tx.get_sponsor_nonce()); + assert_eq!(tx.anchor_mode, origin_tx.anchor_mode); + assert_eq!(tx.post_condition_mode, origin_tx.post_condition_mode); + assert_eq!(tx.post_conditions, origin_tx.post_conditions); + assert_eq!(tx.payload, origin_tx.payload); + + // auth is standard and first two auth fields are signatures for compressed keys. + // third field is the third public key + match origin_tx.auth { + TransactionAuth::Sponsored(ref origin, ref sponsor) => { + match origin { + TransactionSpendingCondition::Singlesig(ref data) => { + assert_eq!(data.key_encoding, TransactionPublicKeyEncoding::Compressed); + assert_eq!(data.signer, origin_address.bytes); + } + _ => assert!(false), + } + match sponsor { + TransactionSpendingCondition::OrderIndependentMultisig(ref data) => { + assert_eq!(data.signer, sponsor_address.bytes); + assert_eq!(data.fields.len(), 3); + assert!(data.fields[0].is_signature()); + assert!(data.fields[1].is_public_key()); + assert!(data.fields[2].is_signature()); + + assert_eq!( + data.fields[0].as_signature().unwrap().0, + TransactionPublicKeyEncoding::Compressed + ); + assert_eq!( + data.fields[2].as_signature().unwrap().0, + TransactionPublicKeyEncoding::Compressed + ); + assert_eq!(data.fields[1].as_public_key().unwrap(), pubk_2); + } + _ => assert!(false), + } + } + _ => assert!(false), + }; + + test_signature_and_corruption(&origin_tx, true, false); + test_signature_and_corruption(&origin_tx, false, true); + } + } + + #[test] + fn tx_stacks_transaction_sign_verify_sponsored_order_independent_p2wsh_2_out_of_7() { + let origin_privk = StacksPrivateKey::from_hex( + "807bbe9e471ac976592cc35e3056592ecc0f778ee653fced3b491a122dd8d59701", + ) + .unwrap(); + + let privk_1 = StacksPrivateKey::from_hex( + "6d430bb91222408e7706c9001cfaeb91b08c2be6d5ac95779ab52c6b431950e001", + ) + .unwrap(); + let privk_2 = StacksPrivateKey::from_hex( + "2a584d899fed1d24e26b524f202763c8ab30260167429f157f1c119f550fa6af01", + ) + .unwrap(); + let privk_3 = StacksPrivateKey::from_hex( + "d5200dee706ee53ae98a03fba6cf4fdcc5084c30cfa9e1b3462dcdeaa3e0f1d201", + ) + .unwrap(); + let privk_4 = StacksPrivateKey::from_hex( + "3beb8916404874f5d5de162c95470951de5b4a7f6ec8d7a20511551821f16db501", + ) + .unwrap(); + let privk_5 = StacksPrivateKey::from_hex( + "601aa0939e98efec29a4dc645377c9d4acaa0b7318444ec8fd7d090d0b36d85b01", + ) + .unwrap(); + let privk_6 = StacksPrivateKey::from_hex( + "5a4ca3db5a3b36bc32d9f2f0894435cbc4b2b1207e95ee283616d9a0797210da01", + ) + .unwrap(); + let privk_7 = StacksPrivateKey::from_hex( + "068856c242bfebdc57700fa598fae4e8ebb6b5f6bf932177018071489737d3ff01", + ) + .unwrap(); + + let pubk_1 = StacksPublicKey::from_private(&privk_1); + let pubk_2 = StacksPublicKey::from_private(&privk_2); + let pubk_3 = StacksPublicKey::from_private(&privk_3); + let pubk_4 = StacksPublicKey::from_private(&privk_4); + let pubk_5 = StacksPublicKey::from_private(&privk_5); + let pubk_6 = StacksPublicKey::from_private(&privk_6); + let pubk_7 = StacksPublicKey::from_private(&privk_7); + + let random_sponsor = StacksPrivateKey::new(); // what the origin sees + + let auth = TransactionAuth::Sponsored( + TransactionSpendingCondition::new_singlesig_p2pkh(StacksPublicKey::from_private( + &origin_privk, + )) + .unwrap(), + TransactionSpendingCondition::new_singlesig_p2pkh(StacksPublicKey::from_private( + &random_sponsor, + )) + .unwrap(), + ); + + let real_sponsor = TransactionSpendingCondition::new_multisig_order_independent_p2wsh( + 2, + vec![ + pubk_1.clone(), + pubk_2.clone(), + pubk_3.clone(), + pubk_4.clone(), + pubk_5.clone(), + pubk_6.clone(), + pubk_7.clone(), + ], + ) + .unwrap(); + + let origin_address = auth.origin().address_mainnet(); + let sponsor_address = real_sponsor.address_mainnet(); + + assert_eq!( + origin_address, + StacksAddress { + version: C32_ADDRESS_VERSION_MAINNET_SINGLESIG, + bytes: Hash160::from_hex("3597aaa4bde720be93e3829aae24e76e7fcdfd3e").unwrap(), + } + ); + assert_eq!( + sponsor_address, + StacksAddress { + version: C32_ADDRESS_VERSION_MAINNET_MULTISIG, + bytes: Hash160::from_hex("e3001c2b12f24ba279116d7001e3bd82b2b5eab4").unwrap(), + } + ); + + let txs = tx_stacks_transaction_test_txs(&auth); + + for mut tx in txs { + assert_eq!(tx.auth().origin().num_signatures(), 0); + assert_eq!(tx.auth().sponsor().unwrap().num_signatures(), 0); + + tx.set_tx_fee(123); + tx.set_sponsor_nonce(456).unwrap(); + let mut tx_signer = StacksTransactionSigner::new(&tx); + + tx_signer.sign_origin(&origin_privk).unwrap(); + + // sponsor sets and pays fee after origin signs + let mut origin_tx = tx_signer.get_tx_incomplete(); + origin_tx.auth.set_sponsor(real_sponsor.clone()).unwrap(); + origin_tx.set_tx_fee(456); + origin_tx.set_sponsor_nonce(789).unwrap(); + + let initial_sig_hash = tx_signer.sighash; + let sig1 = origin_tx + .sign_no_append_sponsor(&initial_sig_hash, &privk_1) + .unwrap(); + let sig7 = origin_tx + .sign_no_append_sponsor(&initial_sig_hash, &privk_7) + .unwrap(); + + let _ = + origin_tx.append_sponsor_signature(sig1, TransactionPublicKeyEncoding::Compressed); + let _ = origin_tx.append_next_sponsor(&pubk_2); + let _ = origin_tx.append_next_sponsor(&pubk_3); + let _ = origin_tx.append_next_sponsor(&pubk_4); + let _ = origin_tx.append_next_sponsor(&pubk_5); + let _ = origin_tx.append_next_sponsor(&pubk_6); + let _ = + origin_tx.append_sponsor_signature(sig7, TransactionPublicKeyEncoding::Compressed); + + tx.set_tx_fee(456); + tx.set_sponsor_nonce(789).unwrap(); + + check_oversign_origin_singlesig(&mut origin_tx); + check_oversign_sponsor_multisig(&mut origin_tx); + check_oversign_sponsor_multisig_uncompressed(&mut origin_tx); + + assert_eq!(origin_tx.auth().origin().num_signatures(), 1); + assert_eq!(origin_tx.auth().sponsor().unwrap().num_signatures(), 2); + + // tx and origin_tx are otherwise equal + assert_eq!(tx.version, origin_tx.version); + assert_eq!(tx.chain_id, origin_tx.chain_id); + assert_eq!(tx.get_tx_fee(), origin_tx.get_tx_fee()); + assert_eq!(tx.get_origin_nonce(), origin_tx.get_origin_nonce()); + assert_eq!(tx.get_sponsor_nonce(), origin_tx.get_sponsor_nonce()); + assert_eq!(tx.anchor_mode, origin_tx.anchor_mode); + assert_eq!(tx.post_condition_mode, origin_tx.post_condition_mode); + assert_eq!(tx.post_conditions, origin_tx.post_conditions); + assert_eq!(tx.payload, origin_tx.payload); + + // auth is standard and first two auth fields are signatures for compressed keys. + // third field is the third public key + match origin_tx.auth { + TransactionAuth::Sponsored(ref origin, ref sponsor) => { + match origin { + TransactionSpendingCondition::Singlesig(ref data) => { + assert_eq!(data.key_encoding, TransactionPublicKeyEncoding::Compressed); + assert_eq!(data.signer, origin_address.bytes); + } + _ => assert!(false), + } + match sponsor { + TransactionSpendingCondition::OrderIndependentMultisig(ref data) => { + assert_eq!(data.signer, sponsor_address.bytes); + assert_eq!(data.fields.len(), 7); + assert!(data.fields[0].is_signature()); + assert!(data.fields[1].is_public_key()); + assert!(data.fields[2].is_public_key()); + assert!(data.fields[3].is_public_key()); + assert!(data.fields[4].is_public_key()); + assert!(data.fields[5].is_public_key()); + assert!(data.fields[6].is_signature()); + + assert_eq!( + data.fields[0].as_signature().unwrap().0, + TransactionPublicKeyEncoding::Compressed + ); + assert_eq!( + data.fields[6].as_signature().unwrap().0, + TransactionPublicKeyEncoding::Compressed + ); + assert_eq!(data.fields[1].as_public_key().unwrap(), pubk_2); + assert_eq!(data.fields[2].as_public_key().unwrap(), pubk_3); + assert_eq!(data.fields[3].as_public_key().unwrap(), pubk_4); + assert_eq!(data.fields[4].as_public_key().unwrap(), pubk_5); + assert_eq!(data.fields[5].as_public_key().unwrap(), pubk_6); + } + _ => assert!(false), + } + } + _ => assert!(false), + }; + + test_signature_and_corruption(&origin_tx, true, false); + test_signature_and_corruption(&origin_tx, false, true); + } + } + + #[test] + fn tx_stacks_transaction_sign_verify_standard_both_multisig_p2sh() { + let privk_1 = StacksPrivateKey::from_hex( + "6d430bb91222408e7706c9001cfaeb91b08c2be6d5ac95779ab52c6b431950e001", + ) + .unwrap(); + let privk_2 = StacksPrivateKey::from_hex( + "2a584d899fed1d24e26b524f202763c8ab30260167429f157f1c119f550fa6af01", + ) + .unwrap(); + let privk_3 = StacksPrivateKey::from_hex( + "d5200dee706ee53ae98a03fba6cf4fdcc5084c30cfa9e1b3462dcdeaa3e0f1d201", + ) + .unwrap(); + + let pubk_1 = StacksPublicKey::from_private(&privk_1); + let pubk_2 = StacksPublicKey::from_private(&privk_2); + let pubk_3 = StacksPublicKey::from_private(&privk_3); + + let origin_auth = TransactionAuth::Standard( + TransactionSpendingCondition::new_multisig_p2sh( + 2, + vec![pubk_1.clone(), pubk_2.clone(), pubk_3.clone()], + ) + .unwrap(), + ); + + let order_independent_origin_auth = TransactionAuth::Standard( + TransactionSpendingCondition::new_multisig_order_independent_p2sh( + 2, + vec![pubk_1.clone(), pubk_2.clone(), pubk_3.clone()], + ) + .unwrap(), + ); + + let origin_address = origin_auth.origin().address_mainnet(); + let order_independent_origin_address = + order_independent_origin_auth.origin().address_mainnet(); + + assert_eq!(origin_address, order_independent_origin_address); + assert_eq!( + origin_address, + StacksAddress { + version: C32_ADDRESS_VERSION_MAINNET_MULTISIG, + bytes: Hash160::from_hex("a23ea89d6529ac48ac766f720e480beec7f19273").unwrap(), + } + ); + + let txs = tx_stacks_transaction_test_txs(&origin_auth); + let order_independent_txs = tx_stacks_transaction_test_txs(&order_independent_origin_auth); + + assert_eq!(txs.len(), order_independent_txs.len()); + + for tx in txs { + assert_eq!(tx.auth().origin().num_signatures(), 0); + + let mut tx_signer = StacksTransactionSigner::new(&tx); + tx_signer.sign_origin(&privk_1).unwrap(); + tx_signer.sign_origin(&privk_2).unwrap(); + tx_signer.append_origin(&pubk_3).unwrap(); + let mut signed_tx = tx_signer.get_tx().unwrap(); + assert_eq!(signed_tx.auth().origin().num_signatures(), 2); + + check_oversign_origin_multisig(&mut signed_tx); + check_sign_no_sponsor(&mut signed_tx); + + // tx and signed_tx are otherwise equal + assert_eq!(tx.version, signed_tx.version); + assert_eq!(tx.get_tx_fee(), signed_tx.get_tx_fee()); + assert_eq!(tx.get_origin_nonce(), signed_tx.get_origin_nonce()); + assert_eq!(tx.get_sponsor_nonce(), signed_tx.get_sponsor_nonce()); + assert_eq!(tx.anchor_mode, signed_tx.anchor_mode); + assert_eq!(tx.post_condition_mode, signed_tx.post_condition_mode); + assert_eq!(tx.post_conditions, signed_tx.post_conditions); + assert_eq!(tx.payload, signed_tx.payload); + + match signed_tx.auth { + TransactionAuth::Standard(ref origin) => match origin { + TransactionSpendingCondition::Multisig(ref data) => { + assert_eq!(data.signer, origin_address.bytes); + assert_eq!(data.fields.len(), 3); + assert!(data.fields[0].is_signature()); + assert!(data.fields[1].is_signature()); + assert!(data.fields[2].is_public_key()); + + assert_eq!( + data.fields[0].as_signature().unwrap().0, + TransactionPublicKeyEncoding::Compressed + ); + assert_eq!( + data.fields[1].as_signature().unwrap().0, + TransactionPublicKeyEncoding::Compressed + ); + assert_eq!(data.fields[2].as_public_key().unwrap(), pubk_3); + } + _ => assert!(false), + }, + _ => assert!(false), + }; + + test_signature_and_corruption(&signed_tx, true, false); + } + + for mut order_independent_tx in order_independent_txs { + assert_eq!(order_independent_tx.auth().origin().num_signatures(), 0); + + let order_independent_initial_sig_hash = order_independent_tx.sign_begin(); + let sig3 = order_independent_tx + .sign_no_append_origin(&order_independent_initial_sig_hash, &privk_3) + .unwrap(); + let sig2 = order_independent_tx + .sign_no_append_origin(&order_independent_initial_sig_hash, &privk_2) + .unwrap(); + + let _ = order_independent_tx.append_next_origin(&pubk_1); + let _ = order_independent_tx + .append_origin_signature(sig2, TransactionPublicKeyEncoding::Compressed); + let _ = order_independent_tx + .append_origin_signature(sig3, TransactionPublicKeyEncoding::Compressed); + + check_oversign_origin_multisig(&mut order_independent_tx); + check_sign_no_sponsor(&mut order_independent_tx); + + assert_eq!(order_independent_tx.auth().origin().num_signatures(), 2); + + match order_independent_tx.auth { + TransactionAuth::Standard(ref origin) => match origin { + TransactionSpendingCondition::OrderIndependentMultisig(ref data) => { + assert_eq!(data.signer, origin_address.bytes); + assert_eq!(data.fields.len(), 3); + assert!(data.fields[0].is_public_key()); + assert!(data.fields[1].is_signature()); + assert!(data.fields[2].is_signature()); + + assert_eq!(data.fields[0].as_public_key().unwrap(), pubk_1); + assert_eq!( + data.fields[1].as_signature().unwrap().0, + TransactionPublicKeyEncoding::Compressed + ); + assert_eq!( + data.fields[2].as_signature().unwrap().0, + TransactionPublicKeyEncoding::Compressed + ); + } + _ => assert!(false), + }, + _ => assert!(false), + }; + + test_signature_and_corruption(&order_independent_tx, true, false); + } + } + + #[test] + fn tx_stacks_transaction_sign_verify_standard_both_multisig_p2sh_uncompressed() { + let privk_1 = StacksPrivateKey::from_hex( + "6d430bb91222408e7706c9001cfaeb91b08c2be6d5ac95779ab52c6b431950e0", + ) + .unwrap(); + let privk_2 = StacksPrivateKey::from_hex( + "2a584d899fed1d24e26b524f202763c8ab30260167429f157f1c119f550fa6af", + ) + .unwrap(); + let privk_3 = StacksPrivateKey::from_hex( + "d5200dee706ee53ae98a03fba6cf4fdcc5084c30cfa9e1b3462dcdeaa3e0f1d2", + ) + .unwrap(); + + let pubk_1 = StacksPublicKey::from_private(&privk_1); + let pubk_2 = StacksPublicKey::from_private(&privk_2); + let pubk_3 = StacksPublicKey::from_private(&privk_3); + + let origin_auth = TransactionAuth::Standard( + TransactionSpendingCondition::new_multisig_p2sh( + 2, + vec![pubk_1.clone(), pubk_2.clone(), pubk_3.clone()], + ) + .unwrap(), + ); + + let order_independent_origin_auth = TransactionAuth::Standard( + TransactionSpendingCondition::new_multisig_order_independent_p2sh( + 2, + vec![pubk_1.clone(), pubk_2.clone(), pubk_3.clone()], + ) + .unwrap(), + ); + + let origin_address = origin_auth.origin().address_mainnet(); + let order_independent_origin_address = + order_independent_origin_auth.origin().address_mainnet(); + assert_eq!(origin_address, order_independent_origin_address); + + assert_eq!( + origin_address, + StacksAddress { + version: C32_ADDRESS_VERSION_MAINNET_MULTISIG, + bytes: Hash160::from_hex("73a8b4a751a678fe83e9d35ce301371bb3d397f7").unwrap(), + } + ); + + let txs = tx_stacks_transaction_test_txs(&origin_auth); + let order_independent_txs = tx_stacks_transaction_test_txs(&order_independent_origin_auth); + + assert_eq!(txs.len(), order_independent_txs.len()); + + for tx in txs { + assert_eq!(tx.auth().origin().num_signatures(), 0); + + let mut tx_signer = StacksTransactionSigner::new(&tx); + + tx_signer.sign_origin(&privk_1).unwrap(); + tx_signer.sign_origin(&privk_2).unwrap(); + tx_signer.append_origin(&pubk_3).unwrap(); + + let mut signed_tx = tx_signer.get_tx().unwrap(); + + check_oversign_origin_multisig(&mut signed_tx); + check_sign_no_sponsor(&mut signed_tx); + + assert_eq!(signed_tx.auth().origin().num_signatures(), 2); + + // tx and signed_tx are otherwise equal + assert_eq!(tx.version, signed_tx.version); + assert_eq!(tx.chain_id, signed_tx.chain_id); + assert_eq!(tx.get_tx_fee(), signed_tx.get_tx_fee()); + assert_eq!(tx.get_origin_nonce(), signed_tx.get_origin_nonce()); + assert_eq!(tx.get_sponsor_nonce(), signed_tx.get_sponsor_nonce()); + assert_eq!(tx.anchor_mode, signed_tx.anchor_mode); + assert_eq!(tx.post_condition_mode, signed_tx.post_condition_mode); + assert_eq!(tx.post_conditions, signed_tx.post_conditions); + assert_eq!(tx.payload, signed_tx.payload); + + // auth is standard and first two auth fields are signatures for uncompressed keys. + // third field is the third public key + match signed_tx.auth { + TransactionAuth::Standard(ref origin) => match origin { + TransactionSpendingCondition::Multisig(ref data) => { + assert_eq!(data.signer, origin_address.bytes); + assert_eq!(data.fields.len(), 3); + assert!(data.fields[0].is_signature()); + assert!(data.fields[1].is_signature()); + assert!(data.fields[2].is_public_key()); + + assert_eq!( + data.fields[0].as_signature().unwrap().0, + TransactionPublicKeyEncoding::Uncompressed + ); + assert_eq!( + data.fields[1].as_signature().unwrap().0, + TransactionPublicKeyEncoding::Uncompressed + ); + assert_eq!(data.fields[2].as_public_key().unwrap(), pubk_3); + } + _ => assert!(false), + }, + _ => assert!(false), + }; + + test_signature_and_corruption(&signed_tx, true, false); + } + + for mut tx in order_independent_txs { + assert_eq!(tx.auth().origin().num_signatures(), 0); + + let tx_signer = StacksTransactionSigner::new(&tx); + + let initial_sig_hash = tx.sign_begin(); + let sig3 = tx + .sign_no_append_origin(&initial_sig_hash, &privk_3) + .unwrap(); + let sig2 = tx + .sign_no_append_origin(&initial_sig_hash, &privk_2) + .unwrap(); + + let _ = tx.append_next_origin(&pubk_1); + let _ = tx.append_origin_signature(sig2, TransactionPublicKeyEncoding::Uncompressed); + let _ = tx.append_origin_signature(sig3, TransactionPublicKeyEncoding::Uncompressed); + + check_oversign_origin_multisig(&mut tx); + check_sign_no_sponsor(&mut tx); + + assert_eq!(tx.auth().origin().num_signatures(), 2); + + // auth is standard and first two auth fields are signatures for compressed keys. + // third field is the third public key + match tx.auth { + TransactionAuth::Standard(ref origin) => match origin { + TransactionSpendingCondition::OrderIndependentMultisig(ref data) => { + assert_eq!(data.signer, origin_address.bytes); + assert_eq!(data.fields.len(), 3); + assert!(data.fields[0].is_public_key()); + assert!(data.fields[1].is_signature()); + assert!(data.fields[2].is_signature()); + + assert_eq!(data.fields[0].as_public_key().unwrap(), pubk_1); + assert_eq!( + data.fields[1].as_signature().unwrap().0, + TransactionPublicKeyEncoding::Uncompressed + ); + assert_eq!( + data.fields[2].as_signature().unwrap().0, + TransactionPublicKeyEncoding::Uncompressed + ); + } + _ => assert!(false), + }, + _ => assert!(false), + }; + + test_signature_and_corruption(&tx, true, false); + } + } + + #[test] + fn tx_stacks_transaction_sign_verify_standard_both_multisig_p2wsh() { + let privk_1 = StacksPrivateKey::from_hex( + "6d430bb91222408e7706c9001cfaeb91b08c2be6d5ac95779ab52c6b431950e001", + ) + .unwrap(); + let privk_2 = StacksPrivateKey::from_hex( + "2a584d899fed1d24e26b524f202763c8ab30260167429f157f1c119f550fa6af01", + ) + .unwrap(); + let privk_3 = StacksPrivateKey::from_hex( + "d5200dee706ee53ae98a03fba6cf4fdcc5084c30cfa9e1b3462dcdeaa3e0f1d201", + ) + .unwrap(); + + let pubk_1 = StacksPublicKey::from_private(&privk_1); + let pubk_2 = StacksPublicKey::from_private(&privk_2); + let pubk_3 = StacksPublicKey::from_private(&privk_3); + + let origin_auth = TransactionAuth::Standard( + TransactionSpendingCondition::new_multisig_p2wsh( + 2, + vec![pubk_1.clone(), pubk_2.clone(), pubk_3.clone()], + ) + .unwrap(), + ); + + let order_independent_origin_auth = TransactionAuth::Standard( + TransactionSpendingCondition::new_multisig_order_independent_p2wsh( + 2, + vec![pubk_1.clone(), pubk_2.clone(), pubk_3.clone()], + ) + .unwrap(), + ); + + let origin_address = origin_auth.origin().address_mainnet(); + let order_independent_origin_address = + order_independent_origin_auth.origin().address_mainnet(); + assert_eq!(origin_address, order_independent_origin_address); + + assert_eq!( + origin_address, + StacksAddress { + version: C32_ADDRESS_VERSION_MAINNET_MULTISIG, + bytes: Hash160::from_hex("f5cfb61a07fb41a32197da01ce033888f0fe94a7").unwrap(), + } + ); + + let txs = tx_stacks_transaction_test_txs(&origin_auth); + let order_independent_txs = tx_stacks_transaction_test_txs(&order_independent_origin_auth); + + for tx in txs { + assert_eq!(tx.auth().origin().num_signatures(), 0); + + let mut tx_signer = StacksTransactionSigner::new(&tx); + tx_signer.sign_origin(&privk_1).unwrap(); + tx_signer.sign_origin(&privk_2).unwrap(); + tx_signer.append_origin(&pubk_3).unwrap(); + let mut signed_tx = tx_signer.get_tx().unwrap(); + + check_oversign_origin_multisig(&mut signed_tx); + check_oversign_origin_multisig_uncompressed(&mut signed_tx); + check_sign_no_sponsor(&mut signed_tx); + + assert_eq!(signed_tx.auth().origin().num_signatures(), 2); + + // tx and signed_tx are otherwise equal + assert_eq!(tx.version, signed_tx.version); + assert_eq!(tx.get_tx_fee(), signed_tx.get_tx_fee()); + assert_eq!(tx.get_origin_nonce(), signed_tx.get_origin_nonce()); + assert_eq!(tx.get_sponsor_nonce(), signed_tx.get_sponsor_nonce()); + assert_eq!(tx.anchor_mode, signed_tx.anchor_mode); + assert_eq!(tx.post_condition_mode, signed_tx.post_condition_mode); + assert_eq!(tx.post_conditions, signed_tx.post_conditions); + assert_eq!(tx.payload, signed_tx.payload); + + // auth is standard and first two auth fields are signatures for compressed keys. + // third field is the third public key + match signed_tx.auth { + TransactionAuth::Standard(ref origin) => match origin { + TransactionSpendingCondition::Multisig(ref data) => { + assert_eq!(data.signer, origin_address.bytes); + assert_eq!(data.fields.len(), 3); + assert!(data.fields[0].is_signature()); + assert!(data.fields[1].is_signature()); + assert!(data.fields[2].is_public_key()); + + assert_eq!( + data.fields[0].as_signature().unwrap().0, + TransactionPublicKeyEncoding::Compressed + ); + assert_eq!( + data.fields[1].as_signature().unwrap().0, + TransactionPublicKeyEncoding::Compressed + ); + assert_eq!(data.fields[2].as_public_key().unwrap(), pubk_3); + } + _ => assert!(false), + }, + _ => assert!(false), + }; + + test_signature_and_corruption(&signed_tx, true, false); + } + + for mut tx in order_independent_txs { + assert_eq!(tx.auth().origin().num_signatures(), 0); + + let tx_signer = StacksTransactionSigner::new(&tx); + + let initial_sig_hash = tx.sign_begin(); + let sig3 = tx + .sign_no_append_origin(&initial_sig_hash, &privk_3) + .unwrap(); + let sig1 = tx + .sign_no_append_origin(&initial_sig_hash, &privk_1) + .unwrap(); + + let _ = tx.append_origin_signature(sig1, TransactionPublicKeyEncoding::Compressed); + let _ = tx.append_next_origin(&pubk_2); + let _ = tx.append_origin_signature(sig3, TransactionPublicKeyEncoding::Compressed); + + check_oversign_origin_multisig(&mut tx); + check_oversign_origin_multisig_uncompressed(&mut tx); + check_sign_no_sponsor(&mut tx); + + assert_eq!(tx.auth().origin().num_signatures(), 2); + + // auth is standard and first two auth fields are signatures for compressed keys. + // third field is the third public key + match tx.auth { + TransactionAuth::Standard(ref origin) => match origin { + TransactionSpendingCondition::OrderIndependentMultisig(ref data) => { + assert_eq!(data.signer, origin_address.bytes); + assert_eq!(data.fields.len(), 3); + assert!(data.fields[0].is_signature()); + assert!(data.fields[1].is_public_key()); + assert!(data.fields[2].is_signature()); + + assert_eq!(data.fields[1].as_public_key().unwrap(), pubk_2); + assert_eq!( + data.fields[0].as_signature().unwrap().0, + TransactionPublicKeyEncoding::Compressed + ); + assert_eq!( + data.fields[2].as_signature().unwrap().0, + TransactionPublicKeyEncoding::Compressed + ); + } + _ => assert!(false), + }, + _ => assert!(false), + }; + + test_signature_and_corruption(&tx, true, false); + } + } + + #[test] + fn tx_stacks_transaction_sign_verify_sponsored_both_multisig_p2sh() { + let origin_privk = StacksPrivateKey::from_hex( + "807bbe9e471ac976592cc35e3056592ecc0f778ee653fced3b491a122dd8d59701", + ) + .unwrap(); + + let privk_1 = StacksPrivateKey::from_hex( + "6d430bb91222408e7706c9001cfaeb91b08c2be6d5ac95779ab52c6b431950e001", + ) + .unwrap(); + let privk_2 = StacksPrivateKey::from_hex( + "2a584d899fed1d24e26b524f202763c8ab30260167429f157f1c119f550fa6af01", + ) + .unwrap(); + let privk_3 = StacksPrivateKey::from_hex( + "d5200dee706ee53ae98a03fba6cf4fdcc5084c30cfa9e1b3462dcdeaa3e0f1d201", + ) + .unwrap(); + + let pubk_1 = StacksPublicKey::from_private(&privk_1); + let pubk_2 = StacksPublicKey::from_private(&privk_2); + let pubk_3 = StacksPublicKey::from_private(&privk_3); + + let random_sponsor = StacksPrivateKey::new(); // what the origin sees + + let auth = TransactionAuth::Sponsored( + TransactionSpendingCondition::new_singlesig_p2pkh(StacksPublicKey::from_private( + &origin_privk, + )) + .unwrap(), + TransactionSpendingCondition::new_singlesig_p2pkh(StacksPublicKey::from_private( + &random_sponsor, + )) + .unwrap(), + ); + + let real_sponsor = TransactionSpendingCondition::new_multisig_p2sh( + 2, + vec![pubk_1.clone(), pubk_2.clone(), pubk_3.clone()], + ) + .unwrap(); + + let real_order_independent_sponsor = + TransactionSpendingCondition::new_multisig_order_independent_p2sh( + 2, + vec![pubk_1.clone(), pubk_2.clone(), pubk_3.clone()], + ) + .unwrap(); + + let origin_address = auth.origin().address_mainnet(); + let sponsor_address = real_sponsor.address_mainnet(); + let order_independent_sponsor_address = real_order_independent_sponsor.address_mainnet(); + + assert_eq!( + origin_address, + StacksAddress { + version: C32_ADDRESS_VERSION_MAINNET_SINGLESIG, + bytes: Hash160::from_hex("3597aaa4bde720be93e3829aae24e76e7fcdfd3e").unwrap(), + } + ); + assert_eq!(sponsor_address, order_independent_sponsor_address); + assert_eq!( + sponsor_address, + StacksAddress { + version: C32_ADDRESS_VERSION_MAINNET_MULTISIG, + bytes: Hash160::from_hex("a23ea89d6529ac48ac766f720e480beec7f19273").unwrap(), + } + ); + + let txs = tx_stacks_transaction_test_txs(&auth); + let order_independent_txs = tx_stacks_transaction_test_txs(&auth); // no difference + + for mut tx in txs { + assert_eq!(tx.auth().origin().num_signatures(), 0); + assert_eq!(tx.auth().sponsor().unwrap().num_signatures(), 0); + + tx.set_tx_fee(123); + tx.set_sponsor_nonce(456).unwrap(); + let mut tx_signer = StacksTransactionSigner::new(&tx); + + tx_signer.sign_origin(&origin_privk).unwrap(); + + // sponsor sets and pays fee after origin signs + let mut origin_tx = tx_signer.get_tx_incomplete(); + origin_tx.auth.set_sponsor(real_sponsor.clone()).unwrap(); + origin_tx.set_tx_fee(456); + origin_tx.set_sponsor_nonce(789).unwrap(); + tx_signer.resume(&origin_tx); + + tx_signer.sign_sponsor(&privk_1).unwrap(); + tx_signer.sign_sponsor(&privk_2).unwrap(); + tx_signer.append_sponsor(&pubk_3).unwrap(); + + tx.set_tx_fee(456); + tx.set_sponsor_nonce(789).unwrap(); + let mut signed_tx = tx_signer.get_tx().unwrap(); + + check_oversign_origin_singlesig(&mut signed_tx); + check_oversign_sponsor_multisig(&mut signed_tx); + + assert_eq!(signed_tx.auth().origin().num_signatures(), 1); + assert_eq!(signed_tx.auth().sponsor().unwrap().num_signatures(), 2); + + // tx and signed_tx are otherwise equal + assert_eq!(tx.version, signed_tx.version); + assert_eq!(tx.chain_id, signed_tx.chain_id); + assert_eq!(tx.get_tx_fee(), signed_tx.get_tx_fee()); + assert_eq!(tx.get_origin_nonce(), signed_tx.get_origin_nonce()); + assert_eq!(tx.get_sponsor_nonce(), signed_tx.get_sponsor_nonce()); + assert_eq!(tx.anchor_mode, signed_tx.anchor_mode); + assert_eq!(tx.post_condition_mode, signed_tx.post_condition_mode); + assert_eq!(tx.post_conditions, signed_tx.post_conditions); + assert_eq!(tx.payload, signed_tx.payload); + + // auth is standard and first two auth fields are signatures for compressed keys. + // third field is the third public key + match signed_tx.auth { + TransactionAuth::Sponsored(ref origin, ref sponsor) => { + match origin { + TransactionSpendingCondition::Singlesig(ref data) => { + assert_eq!(data.key_encoding, TransactionPublicKeyEncoding::Compressed); + assert_eq!(data.signer, origin_address.bytes); + } + _ => assert!(false), + } + match sponsor { + TransactionSpendingCondition::Multisig(ref data) => { + assert_eq!(data.signer, sponsor_address.bytes); + assert_eq!(data.fields.len(), 3); + assert!(data.fields[0].is_signature()); + assert!(data.fields[1].is_signature()); + assert!(data.fields[2].is_public_key()); + + assert_eq!( + data.fields[0].as_signature().unwrap().0, + TransactionPublicKeyEncoding::Compressed + ); + assert_eq!( + data.fields[1].as_signature().unwrap().0, + TransactionPublicKeyEncoding::Compressed + ); + assert_eq!(data.fields[2].as_public_key().unwrap(), pubk_3); + } + _ => assert!(false), + } + } + _ => assert!(false), + }; + + test_signature_and_corruption(&signed_tx, true, false); + test_signature_and_corruption(&signed_tx, false, true); + } + + for mut tx in order_independent_txs { + assert_eq!(tx.auth().origin().num_signatures(), 0); + assert_eq!(tx.auth().sponsor().unwrap().num_signatures(), 0); + + tx.set_tx_fee(123); + tx.set_sponsor_nonce(456).unwrap(); + let mut tx_signer = StacksTransactionSigner::new(&tx); + + tx_signer.sign_origin(&origin_privk).unwrap(); + + // sponsor sets and pays fee after origin signs + let mut origin_tx = tx_signer.get_tx_incomplete(); + origin_tx + .auth + .set_sponsor(real_order_independent_sponsor.clone()) + .unwrap(); + origin_tx.set_tx_fee(456); + origin_tx.set_sponsor_nonce(789).unwrap(); + + let initial_sig_hash = tx_signer.sighash; + let sig1 = origin_tx + .sign_no_append_sponsor(&initial_sig_hash, &privk_1) + .unwrap(); + let sig2 = origin_tx + .sign_no_append_sponsor(&initial_sig_hash, &privk_2) + .unwrap(); + + let _ = + origin_tx.append_sponsor_signature(sig1, TransactionPublicKeyEncoding::Compressed); + let _ = + origin_tx.append_sponsor_signature(sig2, TransactionPublicKeyEncoding::Compressed); + let _ = origin_tx.append_next_sponsor(&pubk_3); + + tx.set_tx_fee(456); + tx.set_sponsor_nonce(789).unwrap(); + + check_oversign_origin_singlesig(&mut origin_tx); + check_oversign_sponsor_multisig(&mut origin_tx); + + assert_eq!(origin_tx.auth().origin().num_signatures(), 1); + assert_eq!(origin_tx.auth().sponsor().unwrap().num_signatures(), 2); + + // tx and origin_tx are otherwise equal + assert_eq!(tx.version, origin_tx.version); + assert_eq!(tx.chain_id, origin_tx.chain_id); + assert_eq!(tx.get_tx_fee(), origin_tx.get_tx_fee()); + assert_eq!(tx.get_origin_nonce(), origin_tx.get_origin_nonce()); + assert_eq!(tx.get_sponsor_nonce(), origin_tx.get_sponsor_nonce()); + assert_eq!(tx.anchor_mode, origin_tx.anchor_mode); + assert_eq!(tx.post_condition_mode, origin_tx.post_condition_mode); + assert_eq!(tx.post_conditions, origin_tx.post_conditions); + assert_eq!(tx.payload, origin_tx.payload); + + // auth is standard and first two auth fields are signatures for compressed keys. + // third field is the third public key + match origin_tx.auth { + TransactionAuth::Sponsored(ref origin, ref sponsor) => { + match origin { + TransactionSpendingCondition::Singlesig(ref data) => { + assert_eq!(data.key_encoding, TransactionPublicKeyEncoding::Compressed); + assert_eq!(data.signer, origin_address.bytes); + } + _ => assert!(false), + } + match sponsor { + TransactionSpendingCondition::OrderIndependentMultisig(ref data) => { + assert_eq!(data.signer, sponsor_address.bytes); + assert_eq!(data.fields.len(), 3); + assert!(data.fields[0].is_signature()); + assert!(data.fields[1].is_signature()); + assert!(data.fields[2].is_public_key()); + + assert_eq!( + data.fields[0].as_signature().unwrap().0, + TransactionPublicKeyEncoding::Compressed + ); + assert_eq!( + data.fields[1].as_signature().unwrap().0, + TransactionPublicKeyEncoding::Compressed + ); + assert_eq!(data.fields[2].as_public_key().unwrap(), pubk_3); + } + _ => assert!(false), + } + } + _ => assert!(false), + }; + + test_signature_and_corruption(&origin_tx, true, false); + test_signature_and_corruption(&origin_tx, false, true); + } + } + + #[test] + fn tx_stacks_transaction_sign_verify_sponsored_both_multisig_p2sh_uncompressed() { + let origin_privk = StacksPrivateKey::from_hex( + "807bbe9e471ac976592cc35e3056592ecc0f778ee653fced3b491a122dd8d59701", + ) + .unwrap(); + + let privk_1 = StacksPrivateKey::from_hex( + "6d430bb91222408e7706c9001cfaeb91b08c2be6d5ac95779ab52c6b431950e0", + ) + .unwrap(); + let privk_2 = StacksPrivateKey::from_hex( + "2a584d899fed1d24e26b524f202763c8ab30260167429f157f1c119f550fa6af", + ) + .unwrap(); + let privk_3 = StacksPrivateKey::from_hex( + "d5200dee706ee53ae98a03fba6cf4fdcc5084c30cfa9e1b3462dcdeaa3e0f1d2", + ) + .unwrap(); + + let pubk_1 = StacksPublicKey::from_private(&privk_1); + let pubk_2 = StacksPublicKey::from_private(&privk_2); + let pubk_3 = StacksPublicKey::from_private(&privk_3); + + let random_sponsor = StacksPrivateKey::new(); // what the origin sees + + let auth = TransactionAuth::Sponsored( + TransactionSpendingCondition::new_singlesig_p2pkh(StacksPublicKey::from_private( + &origin_privk, + )) + .unwrap(), + TransactionSpendingCondition::new_singlesig_p2pkh(StacksPublicKey::from_private( + &random_sponsor, + )) + .unwrap(), + ); + + let real_sponsor = TransactionSpendingCondition::new_multisig_order_independent_p2sh( + 2, + vec![pubk_1.clone(), pubk_2.clone(), pubk_3.clone()], + ) + .unwrap(); + + let real_order_independent_sponsor = + TransactionSpendingCondition::new_multisig_order_independent_p2sh( + 2, + vec![pubk_1.clone(), pubk_2.clone(), pubk_3.clone()], + ) + .unwrap(); + + let origin_address = auth.origin().address_mainnet(); + let sponsor_address = real_sponsor.address_mainnet(); + let order_independent_sponsor_address = real_order_independent_sponsor.address_mainnet(); + + assert_eq!( + origin_address, + StacksAddress { + version: C32_ADDRESS_VERSION_MAINNET_SINGLESIG, + bytes: Hash160::from_hex("3597aaa4bde720be93e3829aae24e76e7fcdfd3e").unwrap(), + } + ); + assert_eq!(sponsor_address, order_independent_sponsor_address); + + assert_eq!( + sponsor_address, + StacksAddress { + version: C32_ADDRESS_VERSION_MAINNET_MULTISIG, + bytes: Hash160::from_hex("73a8b4a751a678fe83e9d35ce301371bb3d397f7").unwrap(), + } + ); + + let txs = tx_stacks_transaction_test_txs(&auth); + let order_independent_txs = tx_stacks_transaction_test_txs(&auth); // no difference + + for mut tx in txs { + assert_eq!(tx.auth().origin().num_signatures(), 0); + assert_eq!(tx.auth().sponsor().unwrap().num_signatures(), 0); + + tx.set_tx_fee(123); + tx.set_sponsor_nonce(456).unwrap(); + let mut tx_signer = StacksTransactionSigner::new(&tx); + + tx_signer.sign_origin(&origin_privk).unwrap(); + + // sponsor sets and pays fee after origin signs + let mut origin_tx = tx_signer.get_tx_incomplete(); + origin_tx.auth.set_sponsor(real_sponsor.clone()).unwrap(); + origin_tx.set_tx_fee(456); + origin_tx.set_sponsor_nonce(789).unwrap(); + + let initial_sig_hash = tx_signer.sighash; + let sig1 = origin_tx + .sign_no_append_sponsor(&initial_sig_hash, &privk_1) + .unwrap(); + let sig2 = origin_tx + .sign_no_append_sponsor(&initial_sig_hash, &privk_2) + .unwrap(); + + let _ = origin_tx + .append_sponsor_signature(sig1, TransactionPublicKeyEncoding::Uncompressed); + let _ = origin_tx + .append_sponsor_signature(sig2, TransactionPublicKeyEncoding::Uncompressed); + let _ = origin_tx.append_next_sponsor(&pubk_3); + + tx.set_tx_fee(456); + tx.set_sponsor_nonce(789).unwrap(); + + check_oversign_origin_singlesig(&mut origin_tx); + check_oversign_sponsor_multisig(&mut origin_tx); + + assert_eq!(origin_tx.auth().origin().num_signatures(), 1); + assert_eq!(origin_tx.auth().sponsor().unwrap().num_signatures(), 2); + + // tx and origin_tx are otherwise equal + assert_eq!(tx.version, origin_tx.version); + assert_eq!(tx.chain_id, origin_tx.chain_id); + assert_eq!(tx.get_tx_fee(), origin_tx.get_tx_fee()); + assert_eq!(tx.get_origin_nonce(), origin_tx.get_origin_nonce()); + assert_eq!(tx.get_sponsor_nonce(), origin_tx.get_sponsor_nonce()); + assert_eq!(tx.anchor_mode, origin_tx.anchor_mode); + assert_eq!(tx.post_condition_mode, origin_tx.post_condition_mode); + assert_eq!(tx.post_conditions, origin_tx.post_conditions); + assert_eq!(tx.payload, origin_tx.payload); + + // auth is standard and first two auth fields are signatures for compressed keys. + // third field is the third public key + match origin_tx.auth { + TransactionAuth::Sponsored(ref origin, ref sponsor) => { + match origin { + TransactionSpendingCondition::Singlesig(ref data) => { + assert_eq!(data.key_encoding, TransactionPublicKeyEncoding::Compressed); + assert_eq!(data.signer, origin_address.bytes); + } + _ => assert!(false), + } + match sponsor { + TransactionSpendingCondition::OrderIndependentMultisig(ref data) => { + assert_eq!(data.signer, sponsor_address.bytes); + assert_eq!(data.fields.len(), 3); + assert!(data.fields[0].is_signature()); + assert!(data.fields[1].is_signature()); + assert!(data.fields[2].is_public_key()); + + assert_eq!( + data.fields[0].as_signature().unwrap().0, + TransactionPublicKeyEncoding::Uncompressed + ); + assert_eq!( + data.fields[1].as_signature().unwrap().0, + TransactionPublicKeyEncoding::Uncompressed + ); + assert_eq!(data.fields[2].as_public_key().unwrap(), pubk_3); + } + _ => assert!(false), + } + } + _ => assert!(false), + }; + + test_signature_and_corruption(&origin_tx, true, false); + test_signature_and_corruption(&origin_tx, false, true); + } + + for mut tx in order_independent_txs { + assert_eq!(tx.auth().origin().num_signatures(), 0); + assert_eq!(tx.auth().sponsor().unwrap().num_signatures(), 0); + + tx.set_tx_fee(123); + tx.set_sponsor_nonce(456).unwrap(); + let mut tx_signer = StacksTransactionSigner::new(&tx); + + tx_signer.sign_origin(&origin_privk).unwrap(); + + // sponsor sets and pays fee after origin signs + let mut origin_tx = tx_signer.get_tx_incomplete(); + origin_tx + .auth + .set_sponsor(real_order_independent_sponsor.clone()) + .unwrap(); + origin_tx.set_tx_fee(456); + origin_tx.set_sponsor_nonce(789).unwrap(); + + let initial_sig_hash = tx_signer.sighash; + let sig1 = origin_tx + .sign_no_append_sponsor(&initial_sig_hash, &privk_1) + .unwrap(); + let sig2 = origin_tx + .sign_no_append_sponsor(&initial_sig_hash, &privk_2) + .unwrap(); + + let _ = origin_tx + .append_sponsor_signature(sig1, TransactionPublicKeyEncoding::Uncompressed); + let _ = origin_tx + .append_sponsor_signature(sig2, TransactionPublicKeyEncoding::Uncompressed); + let _ = origin_tx.append_next_sponsor(&pubk_3); + + tx.set_tx_fee(456); + tx.set_sponsor_nonce(789).unwrap(); + + check_oversign_origin_singlesig(&mut origin_tx); + check_oversign_sponsor_multisig(&mut origin_tx); + + assert_eq!(origin_tx.auth().origin().num_signatures(), 1); + assert_eq!(origin_tx.auth().sponsor().unwrap().num_signatures(), 2); + + // tx and origin_tx are otherwise equal + assert_eq!(tx.version, origin_tx.version); + assert_eq!(tx.chain_id, origin_tx.chain_id); + assert_eq!(tx.get_tx_fee(), origin_tx.get_tx_fee()); + assert_eq!(tx.get_origin_nonce(), origin_tx.get_origin_nonce()); + assert_eq!(tx.get_sponsor_nonce(), origin_tx.get_sponsor_nonce()); + assert_eq!(tx.anchor_mode, origin_tx.anchor_mode); + assert_eq!(tx.post_condition_mode, origin_tx.post_condition_mode); + assert_eq!(tx.post_conditions, origin_tx.post_conditions); + assert_eq!(tx.payload, origin_tx.payload); + + // auth is standard and first two auth fields are signatures for compressed keys. + // third field is the third public key + match origin_tx.auth { + TransactionAuth::Sponsored(ref origin, ref sponsor) => { + match origin { + TransactionSpendingCondition::Singlesig(ref data) => { + assert_eq!(data.key_encoding, TransactionPublicKeyEncoding::Compressed); + assert_eq!(data.signer, origin_address.bytes); + } + _ => assert!(false), + } + match sponsor { + TransactionSpendingCondition::OrderIndependentMultisig(ref data) => { + assert_eq!(data.signer, sponsor_address.bytes); + assert_eq!(data.fields.len(), 3); + assert!(data.fields[0].is_signature()); + assert!(data.fields[1].is_signature()); + assert!(data.fields[2].is_public_key()); + + assert_eq!( + data.fields[0].as_signature().unwrap().0, + TransactionPublicKeyEncoding::Uncompressed + ); + assert_eq!( + data.fields[1].as_signature().unwrap().0, + TransactionPublicKeyEncoding::Uncompressed + ); + assert_eq!(data.fields[2].as_public_key().unwrap(), pubk_3); + } + _ => assert!(false), + } + } + _ => assert!(false), + }; + + test_signature_and_corruption(&origin_tx, true, false); + test_signature_and_corruption(&origin_tx, false, true); + } + } + + #[test] + fn tx_stacks_transaction_sign_verify_sponsored_both_multisig_p2wsh() { + let origin_privk = StacksPrivateKey::from_hex( + "807bbe9e471ac976592cc35e3056592ecc0f778ee653fced3b491a122dd8d59701", + ) + .unwrap(); + + let privk_1 = StacksPrivateKey::from_hex( + "6d430bb91222408e7706c9001cfaeb91b08c2be6d5ac95779ab52c6b431950e001", + ) + .unwrap(); + let privk_2 = StacksPrivateKey::from_hex( + "2a584d899fed1d24e26b524f202763c8ab30260167429f157f1c119f550fa6af01", + ) + .unwrap(); + let privk_3 = StacksPrivateKey::from_hex( + "d5200dee706ee53ae98a03fba6cf4fdcc5084c30cfa9e1b3462dcdeaa3e0f1d201", + ) + .unwrap(); + + let pubk_1 = StacksPublicKey::from_private(&privk_1); + let pubk_2 = StacksPublicKey::from_private(&privk_2); + let pubk_3 = StacksPublicKey::from_private(&privk_3); + + let random_sponsor = StacksPrivateKey::new(); // what the origin sees + + let auth = TransactionAuth::Sponsored( + TransactionSpendingCondition::new_singlesig_p2pkh(StacksPublicKey::from_private( + &origin_privk, + )) + .unwrap(), + TransactionSpendingCondition::new_singlesig_p2pkh(StacksPublicKey::from_private( + &random_sponsor, + )) + .unwrap(), + ); + + let real_sponsor = TransactionSpendingCondition::new_multisig_p2wsh( + 2, + vec![pubk_1.clone(), pubk_2.clone(), pubk_3.clone()], + ) + .unwrap(); + + let real_order_independent_sponsor = + TransactionSpendingCondition::new_multisig_order_independent_p2wsh( + 2, + vec![pubk_1.clone(), pubk_2.clone(), pubk_3.clone()], + ) + .unwrap(); + + let origin_address = auth.origin().address_mainnet(); + let sponsor_address = real_sponsor.address_mainnet(); + let order_independent_sponsor_address = real_order_independent_sponsor.address_mainnet(); + + assert_eq!( + origin_address, + StacksAddress { + version: C32_ADDRESS_VERSION_MAINNET_SINGLESIG, + bytes: Hash160::from_hex("3597aaa4bde720be93e3829aae24e76e7fcdfd3e").unwrap(), + } + ); + assert_eq!(sponsor_address, order_independent_sponsor_address); + + assert_eq!( + sponsor_address, + StacksAddress { + version: C32_ADDRESS_VERSION_MAINNET_MULTISIG, + bytes: Hash160::from_hex("f5cfb61a07fb41a32197da01ce033888f0fe94a7").unwrap(), + } + ); + + let txs = tx_stacks_transaction_test_txs(&auth); + let order_independent_txs = tx_stacks_transaction_test_txs(&auth); // no difference + + for mut tx in txs { + assert_eq!(tx.auth().origin().num_signatures(), 0); + assert_eq!(tx.auth().sponsor().unwrap().num_signatures(), 0); + + tx.set_tx_fee(123); + tx.set_sponsor_nonce(456).unwrap(); + let mut tx_signer = StacksTransactionSigner::new(&tx); + + tx_signer.sign_origin(&origin_privk).unwrap(); + + // sponsor sets and pays fee after origin signs + let mut origin_tx = tx_signer.get_tx_incomplete(); + origin_tx.auth.set_sponsor(real_sponsor.clone()).unwrap(); + origin_tx.set_tx_fee(456); + origin_tx.set_sponsor_nonce(789).unwrap(); + tx_signer.resume(&origin_tx); + + tx_signer.sign_sponsor(&privk_1).unwrap(); + tx_signer.sign_sponsor(&privk_2).unwrap(); + tx_signer.append_sponsor(&pubk_3).unwrap(); + + tx.set_tx_fee(456); + tx.set_sponsor_nonce(789).unwrap(); + let mut signed_tx = tx_signer.get_tx().unwrap(); + + check_oversign_origin_singlesig(&mut signed_tx); + check_oversign_sponsor_multisig(&mut signed_tx); + check_oversign_sponsor_multisig_uncompressed(&mut signed_tx); + + assert_eq!(signed_tx.auth().origin().num_signatures(), 1); + assert_eq!(signed_tx.auth().sponsor().unwrap().num_signatures(), 2); + + // tx and signed_tx are otherwise equal + assert_eq!(tx.version, signed_tx.version); + assert_eq!(tx.chain_id, signed_tx.chain_id); + assert_eq!(tx.get_tx_fee(), signed_tx.get_tx_fee()); + assert_eq!(tx.get_origin_nonce(), signed_tx.get_origin_nonce()); + assert_eq!(tx.get_sponsor_nonce(), signed_tx.get_sponsor_nonce()); + assert_eq!(tx.anchor_mode, signed_tx.anchor_mode); + assert_eq!(tx.post_condition_mode, signed_tx.post_condition_mode); + assert_eq!(tx.post_conditions, signed_tx.post_conditions); + assert_eq!(tx.payload, signed_tx.payload); + + // auth is standard and first two auth fields are signatures for compressed keys. + // third field is the third public key + match signed_tx.auth { + TransactionAuth::Sponsored(ref origin, ref sponsor) => { + match origin { + TransactionSpendingCondition::Singlesig(ref data) => { + assert_eq!(data.key_encoding, TransactionPublicKeyEncoding::Compressed); + assert_eq!(data.signer, origin_address.bytes); + } + _ => assert!(false), + } + match sponsor { + TransactionSpendingCondition::Multisig(ref data) => { + assert_eq!(data.signer, sponsor_address.bytes); + assert_eq!(data.fields.len(), 3); + assert!(data.fields[0].is_signature()); + assert!(data.fields[1].is_signature()); + assert!(data.fields[2].is_public_key()); + + assert_eq!( + data.fields[0].as_signature().unwrap().0, + TransactionPublicKeyEncoding::Compressed + ); + assert_eq!( + data.fields[1].as_signature().unwrap().0, + TransactionPublicKeyEncoding::Compressed + ); + assert_eq!(data.fields[2].as_public_key().unwrap(), pubk_3); + } + _ => assert!(false), + } + } + _ => assert!(false), + }; + + test_signature_and_corruption(&signed_tx, true, false); + test_signature_and_corruption(&signed_tx, false, true); + } + + for mut tx in order_independent_txs { + assert_eq!(tx.auth().origin().num_signatures(), 0); + assert_eq!(tx.auth().sponsor().unwrap().num_signatures(), 0); + + tx.set_tx_fee(123); + tx.set_sponsor_nonce(456).unwrap(); + let mut tx_signer = StacksTransactionSigner::new(&tx); + + tx_signer.sign_origin(&origin_privk).unwrap(); + + // sponsor sets and pays fee after origin signs + let mut origin_tx = tx_signer.get_tx_incomplete(); + origin_tx + .auth + .set_sponsor(real_order_independent_sponsor.clone()) + .unwrap(); + origin_tx.set_tx_fee(456); + origin_tx.set_sponsor_nonce(789).unwrap(); + + let initial_sig_hash = tx_signer.sighash; + let sig1 = origin_tx + .sign_no_append_sponsor(&initial_sig_hash, &privk_1) + .unwrap(); + let sig3 = origin_tx + .sign_no_append_sponsor(&initial_sig_hash, &privk_3) + .unwrap(); + + let _ = + origin_tx.append_sponsor_signature(sig1, TransactionPublicKeyEncoding::Compressed); + let _ = origin_tx.append_next_sponsor(&pubk_2); + let _ = + origin_tx.append_sponsor_signature(sig3, TransactionPublicKeyEncoding::Compressed); + + tx.set_tx_fee(456); + tx.set_sponsor_nonce(789).unwrap(); + + check_oversign_origin_singlesig(&mut origin_tx); + check_oversign_sponsor_multisig(&mut origin_tx); + check_oversign_sponsor_multisig_uncompressed(&mut origin_tx); + + assert_eq!(origin_tx.auth().origin().num_signatures(), 1); + assert_eq!(origin_tx.auth().sponsor().unwrap().num_signatures(), 2); + + // tx and origin_tx are otherwise equal + assert_eq!(tx.version, origin_tx.version); + assert_eq!(tx.chain_id, origin_tx.chain_id); + assert_eq!(tx.get_tx_fee(), origin_tx.get_tx_fee()); + assert_eq!(tx.get_origin_nonce(), origin_tx.get_origin_nonce()); + assert_eq!(tx.get_sponsor_nonce(), origin_tx.get_sponsor_nonce()); + assert_eq!(tx.anchor_mode, origin_tx.anchor_mode); + assert_eq!(tx.post_condition_mode, origin_tx.post_condition_mode); + assert_eq!(tx.post_conditions, origin_tx.post_conditions); + assert_eq!(tx.payload, origin_tx.payload); + + // auth is standard and first two auth fields are signatures for compressed keys. + // third field is the third public key + match origin_tx.auth { + TransactionAuth::Sponsored(ref origin, ref sponsor) => { + match origin { + TransactionSpendingCondition::Singlesig(ref data) => { + assert_eq!(data.key_encoding, TransactionPublicKeyEncoding::Compressed); + assert_eq!(data.signer, origin_address.bytes); + } + _ => assert!(false), + } + match sponsor { + TransactionSpendingCondition::OrderIndependentMultisig(ref data) => { + assert_eq!(data.signer, sponsor_address.bytes); + assert_eq!(data.fields.len(), 3); + assert!(data.fields[0].is_signature()); + assert!(data.fields[1].is_public_key()); + assert!(data.fields[2].is_signature()); + + assert_eq!( + data.fields[0].as_signature().unwrap().0, + TransactionPublicKeyEncoding::Compressed + ); + assert_eq!( + data.fields[2].as_signature().unwrap().0, + TransactionPublicKeyEncoding::Compressed + ); + assert_eq!(data.fields[1].as_public_key().unwrap(), pubk_2); + } + _ => assert!(false), + } + } + _ => assert!(false), + }; + + test_signature_and_corruption(&origin_tx, true, false); + test_signature_and_corruption(&origin_tx, false, true); + } + } } diff --git a/stackslib/src/clarity_cli.rs b/stackslib/src/clarity_cli.rs index c14e14aad40..21cf55dea6d 100644 --- a/stackslib/src/clarity_cli.rs +++ b/stackslib/src/clarity_cli.rs @@ -24,7 +24,7 @@ use clarity::vm::coverage::CoverageReporter; use lazy_static::lazy_static; use rand::Rng; use rusqlite::types::ToSql; -use rusqlite::{Connection, OpenFlags, Row, Transaction, NO_PARAMS}; +use rusqlite::{Connection, OpenFlags, Row, Transaction}; use serde::Serialize; use serde_json::json; use stacks_common::address::c32::c32_address; @@ -33,6 +33,7 @@ use stacks_common::consts::{CHAIN_ID_MAINNET, CHAIN_ID_TESTNET}; use stacks_common::types::chainstate::{ BlockHeaderHash, BurnchainHeaderHash, ConsensusHash, StacksAddress, StacksBlockId, VRFSeed, *, }; +use stacks_common::types::sqlite::NO_PARAMS; use stacks_common::util::hash::{bytes_to_hex, Hash160, Sha512Trunc256Sum}; use stacks_common::util::{get_epoch_time_ms, log}; @@ -105,7 +106,6 @@ macro_rules! panic_test { }; } -#[cfg_attr(tarpaulin, skip)] fn print_usage(invoked_by: &str) { eprintln!( "Usage: {} [command] @@ -128,7 +128,6 @@ where command is one of: panic_test!() } -#[cfg_attr(tarpaulin, skip)] fn friendly_expect(input: Result, msg: &str) -> A { input.unwrap_or_else(|e| { eprintln!("{}\nCaused by: {}", msg, e); @@ -136,7 +135,6 @@ fn friendly_expect(input: Result, msg: &str) -> A }) } -#[cfg_attr(tarpaulin, skip)] fn friendly_expect_opt(input: Option, msg: &str) -> A { input.unwrap_or_else(|| { eprintln!("{}", msg); @@ -655,7 +653,11 @@ impl HeadersDB for CLIHeadersDB { } } - fn get_consensus_hash_for_block(&self, id_bhh: &StacksBlockId) -> Option { + fn get_consensus_hash_for_block( + &self, + id_bhh: &StacksBlockId, + _epoch: &StacksEpochId, + ) -> Option { // mock it let conn = self.conn(); if let Some(_) = get_cli_block_height(&conn, id_bhh) { @@ -666,7 +668,11 @@ impl HeadersDB for CLIHeadersDB { } } - fn get_vrf_seed_for_block(&self, id_bhh: &StacksBlockId) -> Option { + fn get_vrf_seed_for_block( + &self, + id_bhh: &StacksBlockId, + _epoch: &StacksEpochId, + ) -> Option { let conn = self.conn(); if let Some(_) = get_cli_block_height(&conn, id_bhh) { // mock it, but make it unique @@ -681,6 +687,7 @@ impl HeadersDB for CLIHeadersDB { fn get_stacks_block_header_hash_for_block( &self, id_bhh: &StacksBlockId, + _epoch: &StacksEpochId, ) -> Option { let conn = self.conn(); if let Some(_) = get_cli_block_height(&conn, id_bhh) { @@ -694,7 +701,11 @@ impl HeadersDB for CLIHeadersDB { } } - fn get_burn_block_time_for_block(&self, id_bhh: &StacksBlockId) -> Option { + fn get_burn_block_time_for_block( + &self, + id_bhh: &StacksBlockId, + _epoch: Option<&StacksEpochId>, + ) -> Option { let conn = self.conn(); if let Some(height) = get_cli_block_height(&conn, id_bhh) { Some((height * 600 + 1231006505) as u64) @@ -703,6 +714,15 @@ impl HeadersDB for CLIHeadersDB { } } + fn get_stacks_block_time_for_block(&self, id_bhh: &StacksBlockId) -> Option { + let conn = self.conn(); + if let Some(height) = get_cli_block_height(&conn, id_bhh) { + Some((height * 10 + 1713799973) as u64) + } else { + None + } + } + fn get_burn_block_height_for_block(&self, id_bhh: &StacksBlockId) -> Option { let conn = self.conn(); if let Some(height) = get_cli_block_height(&conn, id_bhh) { @@ -712,21 +732,37 @@ impl HeadersDB for CLIHeadersDB { } } - fn get_miner_address(&self, _id_bhh: &StacksBlockId) -> Option { + fn get_miner_address( + &self, + _id_bhh: &StacksBlockId, + _epoch: &StacksEpochId, + ) -> Option { None } - fn get_burnchain_tokens_spent_for_block(&self, id_bhh: &StacksBlockId) -> Option { + fn get_burnchain_tokens_spent_for_block( + &self, + id_bhh: &StacksBlockId, + _epoch: &StacksEpochId, + ) -> Option { // if the block is defined at all, then return a constant get_cli_block_height(&self.conn(), id_bhh).map(|_| 2000) } - fn get_burnchain_tokens_spent_for_winning_block(&self, id_bhh: &StacksBlockId) -> Option { + fn get_burnchain_tokens_spent_for_winning_block( + &self, + id_bhh: &StacksBlockId, + _epoch: &StacksEpochId, + ) -> Option { // if the block is defined at all, then return a constant get_cli_block_height(&self.conn(), id_bhh).map(|_| 1000) } - fn get_tokens_earned_for_block(&self, id_bhh: &StacksBlockId) -> Option { + fn get_tokens_earned_for_block( + &self, + id_bhh: &StacksBlockId, + _epoch: &StacksEpochId, + ) -> Option { // if the block is defined at all, then return a constant get_cli_block_height(&self.conn(), id_bhh).map(|_| 3000) } diff --git a/stackslib/src/clarity_vm/clarity.rs b/stackslib/src/clarity_vm/clarity.rs index ac764e0e91a..c89679f4145 100644 --- a/stackslib/src/clarity_vm/clarity.rs +++ b/stackslib/src/clarity_vm/clarity.rs @@ -638,8 +638,7 @@ impl<'a, 'b> ClarityConnection for ClarityBlockConnection<'a, 'b> { where F: FnOnce(ClarityDatabase) -> (R, ClarityDatabase), { - let mut db = - ClarityDatabase::new(&mut self.datastore, &self.header_db, &self.burn_state_db); + let mut db = ClarityDatabase::new(&mut self.datastore, self.header_db, self.burn_state_db); db.begin(); let (result, mut db) = to_do(db); db.roll_back() @@ -672,7 +671,7 @@ impl ClarityConnection for ClarityReadOnlyConnection<'_> { { let mut db = self .datastore - .as_clarity_db(&self.header_db, &self.burn_state_db); + .as_clarity_db(self.header_db, self.burn_state_db); db.begin(); let (result, mut db) = to_do(db); db.roll_back() @@ -1528,8 +1527,8 @@ impl<'a, 'b> ClarityBlockConnection<'a, 'b> { pub fn start_transaction_processing<'c>(&'c mut self) -> ClarityTransactionConnection<'c, 'a> { let store = &mut self.datastore; let cost_track = &mut self.cost_track; - let header_db = &self.header_db; - let burn_state_db = &self.burn_state_db; + let header_db = self.header_db; + let burn_state_db = self.burn_state_db; let mainnet = self.mainnet; let chain_id = self.chain_id; let mut log = RollbackWrapperPersistedLog::new(); @@ -1608,8 +1607,8 @@ impl<'a, 'b> ClarityConnection for ClarityTransactionConnection<'a, 'b> { let rollback_wrapper = RollbackWrapper::from_persisted_log(self.store, log); let mut db = ClarityDatabase::new_with_rollback_wrapper( rollback_wrapper, - &self.header_db, - &self.burn_state_db, + self.header_db, + self.burn_state_db, ); db.begin(); let (r, mut db) = to_do(db); @@ -1673,8 +1672,8 @@ impl<'a, 'b> TransactionConnection for ClarityTransactionConnection<'a, 'b> { let rollback_wrapper = RollbackWrapper::from_persisted_log(self.store, log); let mut db = ClarityDatabase::new_with_rollback_wrapper( rollback_wrapper, - &self.header_db, - &self.burn_state_db, + self.header_db, + self.burn_state_db, ); // wrap the whole contract-call in a claritydb transaction, @@ -1741,8 +1740,8 @@ impl<'a, 'b> ClarityTransactionConnection<'a, 'b> { let rollback_wrapper = RollbackWrapper::from_persisted_log(self.store, log); let mut db = ClarityDatabase::new_with_rollback_wrapper( rollback_wrapper, - &self.header_db, - &self.burn_state_db, + self.header_db, + self.burn_state_db, ); db.begin(); @@ -1888,9 +1887,9 @@ mod tests { use clarity::vm::database::{ClarityBackingStore, STXBalance}; use clarity::vm::test_util::{TEST_BURN_STATE_DB, TEST_HEADER_DB}; use clarity::vm::types::{StandardPrincipalData, Value}; - use rusqlite::NO_PARAMS; use stacks_common::consts::CHAIN_ID_TESTNET; use stacks_common::types::chainstate::ConsensusHash; + use stacks_common::types::sqlite::NO_PARAMS; use super::*; use crate::chainstate::stacks::index::ClarityMarfTrieId; @@ -2691,6 +2690,14 @@ mod tests { pub struct BlockLimitBurnStateDB {} impl BurnStateDB for BlockLimitBurnStateDB { + fn get_tip_burn_block_height(&self) -> Option { + None + } + + fn get_tip_sortition_id(&self) -> Option { + None + } + fn get_burn_block_height(&self, _sortition_id: &SortitionId) -> Option { None } diff --git a/stackslib/src/clarity_vm/database/marf.rs b/stackslib/src/clarity_vm/database/marf.rs index 3e4088b6eb9..fed0e70e95c 100644 --- a/stackslib/src/clarity_vm/database/marf.rs +++ b/stackslib/src/clarity_vm/database/marf.rs @@ -1,7 +1,12 @@ use std::path::PathBuf; use std::str::FromStr; +use clarity::util::hash::Sha512Trunc256Sum; use clarity::vm::analysis::AnalysisDatabase; +use clarity::vm::database::sqlite::{ + sqlite_get_contract_hash, sqlite_get_metadata, sqlite_get_metadata_manual, + sqlite_insert_metadata, +}; use clarity::vm::database::{ BurnStateDB, ClarityBackingStore, ClarityDatabase, HeadersDB, SpecialCaseHandler, SqliteConnection, @@ -451,6 +456,39 @@ impl<'a> ClarityBackingStore for ReadOnlyMarfStore<'a> { error!("Attempted to commit changes to read-only MARF"); panic!("BUG: attempted commit to read-only MARF"); } + + fn get_contract_hash( + &mut self, + contract: &QualifiedContractIdentifier, + ) -> InterpreterResult<(StacksBlockId, Sha512Trunc256Sum)> { + sqlite_get_contract_hash(self, contract) + } + + fn insert_metadata( + &mut self, + contract: &QualifiedContractIdentifier, + key: &str, + value: &str, + ) -> InterpreterResult<()> { + sqlite_insert_metadata(self, contract, key, value) + } + + fn get_metadata( + &mut self, + contract: &QualifiedContractIdentifier, + key: &str, + ) -> InterpreterResult> { + sqlite_get_metadata(self, contract, key) + } + + fn get_metadata_manual( + &mut self, + at_height: u32, + contract: &QualifiedContractIdentifier, + key: &str, + ) -> InterpreterResult> { + sqlite_get_metadata_manual(self, at_height, contract, key) + } } impl<'a> WritableMarfStore<'a> { @@ -692,4 +730,37 @@ impl<'a> ClarityBackingStore for WritableMarfStore<'a> { .insert_batch(&keys, values) .map_err(|_| InterpreterError::Expect("ERROR: Unexpected MARF Failure".into()).into()) } + + fn get_contract_hash( + &mut self, + contract: &QualifiedContractIdentifier, + ) -> InterpreterResult<(StacksBlockId, Sha512Trunc256Sum)> { + sqlite_get_contract_hash(self, contract) + } + + fn insert_metadata( + &mut self, + contract: &QualifiedContractIdentifier, + key: &str, + value: &str, + ) -> InterpreterResult<()> { + sqlite_insert_metadata(self, contract, key, value) + } + + fn get_metadata( + &mut self, + contract: &QualifiedContractIdentifier, + key: &str, + ) -> InterpreterResult> { + sqlite_get_metadata(self, contract, key) + } + + fn get_metadata_manual( + &mut self, + at_height: u32, + contract: &QualifiedContractIdentifier, + key: &str, + ) -> InterpreterResult> { + sqlite_get_metadata_manual(self, at_height, contract, key) + } } diff --git a/stackslib/src/clarity_vm/database/mod.rs b/stackslib/src/clarity_vm/database/mod.rs index c9c21957f3b..81f0bac43c3 100644 --- a/stackslib/src/clarity_vm/database/mod.rs +++ b/stackslib/src/clarity_vm/database/mod.rs @@ -1,16 +1,22 @@ use std::ops::{Deref, DerefMut}; +use clarity::util::hash::Sha512Trunc256Sum; use clarity::vm::analysis::AnalysisDatabase; +use clarity::vm::database::sqlite::{ + sqlite_get_contract_hash, sqlite_get_metadata, sqlite_get_metadata_manual, + sqlite_insert_metadata, +}; use clarity::vm::database::{ BurnStateDB, ClarityBackingStore, ClarityDatabase, HeadersDB, SpecialCaseHandler, SqliteConnection, NULL_BURN_STATE_DB, NULL_HEADER_DB, }; use clarity::vm::errors::{InterpreterResult, RuntimeErrorType}; -use clarity::vm::types::{PrincipalData, TupleData}; -use rusqlite::{Connection, OptionalExtension, Row, ToSql}; +use clarity::vm::types::{PrincipalData, QualifiedContractIdentifier, TupleData}; +use rusqlite::types::ToSql; +use rusqlite::{params, Connection, OptionalExtension, Row}; use stacks_common::types::chainstate::{ BlockHeaderHash, BurnchainHeaderHash, ConsensusHash, SortitionId, StacksAddress, StacksBlockId, - VRFSeed, + TenureBlockId, VRFSeed, }; use stacks_common::types::Address; use stacks_common::util::vrf::VRFProof; @@ -19,55 +25,162 @@ use crate::chainstate::burn::db::sortdb::{ get_ancestor_sort_id, get_ancestor_sort_id_tx, SortitionDB, SortitionDBConn, SortitionHandle, SortitionHandleConn, SortitionHandleTx, }; +use crate::chainstate::nakamoto::{keys as nakamoto_keys, NakamotoChainState, StacksDBIndexed}; use crate::chainstate::stacks::boot::PoxStartCycleInfo; use crate::chainstate::stacks::db::accounts::MinerReward; use crate::chainstate::stacks::db::{ - ChainstateTx, MinerPaymentSchedule, StacksChainState, StacksHeaderInfo, + ChainstateTx, MinerPaymentSchedule, StacksChainState, StacksDBConn, StacksDBTx, + StacksHeaderInfo, }; use crate::chainstate::stacks::index::marf::{MarfConnection, MARF}; use crate::chainstate::stacks::index::{ClarityMarfTrieId, MarfTrieId, TrieMerkleProof}; use crate::chainstate::stacks::Error as ChainstateError; use crate::clarity_vm::special::handle_contract_call_special_cases; use crate::core::{StacksEpoch, StacksEpochId}; -use crate::util_lib::db::{DBConn, FromColumn, FromRow}; +use crate::util_lib::db::{DBConn, Error as DBError, FromColumn, FromRow}; pub mod marf; -pub struct HeadersDBConn<'a>(pub &'a Connection); +pub trait GetTenureStartId { + fn get_tenure_block_id( + &self, + tip: &StacksBlockId, + tenure_id_consensus_hash: &ConsensusHash, + ) -> Result, DBError>; + fn conn(&self) -> &Connection; +} + +impl GetTenureStartId for StacksDBConn<'_> { + fn get_tenure_block_id( + &self, + tip: &StacksBlockId, + tenure_id_consensus_hash: &ConsensusHash, + ) -> Result, DBError> { + Ok(self + .get_indexed( + tip, + &nakamoto_keys::tenure_start_block_id(tenure_id_consensus_hash), + )? + .map(|id_str| nakamoto_keys::parse_block_id(&id_str)) + .flatten() + .map(|block_id| TenureBlockId::from(block_id))) + } + + fn conn(&self) -> &Connection { + self.sqlite() + } +} + +impl GetTenureStartId for StacksDBTx<'_> { + fn get_tenure_block_id( + &self, + tip: &StacksBlockId, + tenure_id_consensus_hash: &ConsensusHash, + ) -> Result, DBError> { + Ok(self + .get_indexed_ref( + tip, + &nakamoto_keys::tenure_start_block_id(tenure_id_consensus_hash), + )? + .map(|id_str| nakamoto_keys::parse_block_id(&id_str)) + .flatten() + .map(|block_id| TenureBlockId::from(block_id))) + } + + fn conn(&self) -> &Connection { + self.sqlite() + } +} + +impl GetTenureStartId for MARF { + fn get_tenure_block_id( + &self, + tip: &StacksBlockId, + tenure_id_consensus_hash: &ConsensusHash, + ) -> Result, DBError> { + let dbconn = StacksDBConn::new(self, ()); + dbconn.get_tenure_block_id(tip, tenure_id_consensus_hash) + } + + fn conn(&self) -> &Connection { + self.sqlite_conn() + } +} + +pub struct HeadersDBConn<'a>(pub StacksDBConn<'a>); impl<'a> HeadersDB for HeadersDBConn<'a> { fn get_stacks_block_header_hash_for_block( &self, id_bhh: &StacksBlockId, + epoch: &StacksEpochId, ) -> Option { - get_stacks_header_column(self.0, id_bhh, "block_hash", |r| { - BlockHeaderHash::from_column(r, "block_hash").expect("FATAL: malformed block hash") - }) + get_stacks_header_column_from_table( + self.0.conn(), + id_bhh, + "block_hash", + &|r| { + BlockHeaderHash::from_column(r, "block_hash").expect("FATAL: malformed block hash") + }, + epoch.uses_nakamoto_blocks(), + ) } fn get_burn_header_hash_for_block( &self, id_bhh: &StacksBlockId, ) -> Option { - get_stacks_header_column(self.0, id_bhh, "burn_header_hash", |r| { + get_stacks_header_column(self.0.conn(), id_bhh, "burn_header_hash", |r| { BurnchainHeaderHash::from_row(r).expect("FATAL: malformed burn_header_hash") }) } - fn get_consensus_hash_for_block(&self, id_bhh: &StacksBlockId) -> Option { - get_stacks_header_column(self.0, id_bhh, "consensus_hash", |r| { - ConsensusHash::from_row(r).expect("FATAL: malformed consensus_hash") - }) + fn get_consensus_hash_for_block( + &self, + id_bhh: &StacksBlockId, + epoch: &StacksEpochId, + ) -> Option { + get_stacks_header_column_from_table( + self.0.conn(), + id_bhh, + "consensus_hash", + &|r| ConsensusHash::from_row(r).expect("FATAL: malformed consensus_hash"), + epoch.uses_nakamoto_blocks(), + ) } - fn get_burn_block_time_for_block(&self, id_bhh: &StacksBlockId) -> Option { - get_stacks_header_column(self.0, id_bhh, "burn_header_timestamp", |r| { - u64::from_row(r).expect("FATAL: malformed burn_header_timestamp") - }) + fn get_burn_block_time_for_block( + &self, + id_bhh: &StacksBlockId, + epoch_opt: Option<&StacksEpochId>, + ) -> Option { + if let Some(epoch) = epoch_opt { + get_stacks_header_column_from_table( + self.0.conn(), + id_bhh, + "burn_header_timestamp", + &|r| u64::from_row(r).expect("FATAL: malformed burn_header_timestamp"), + epoch.uses_nakamoto_blocks(), + ) + } else { + get_stacks_header_column(self.0.conn(), id_bhh, "burn_header_timestamp", |r| { + u64::from_row(r).expect("FATAL: malformed burn_header_timestamp") + }) + } + } + + fn get_stacks_block_time_for_block(&self, id_bhh: &StacksBlockId) -> Option { + get_stacks_header_column_from_table( + self.0.conn(), + id_bhh, + "timestamp", + &|r| u64::from_row(r).expect("FATAL: malformed timestamp"), + true, + ) } fn get_burn_block_height_for_block(&self, id_bhh: &StacksBlockId) -> Option { - get_stacks_header_column(self.0, id_bhh, "burn_header_height", |r| { + get_stacks_header_column(self.0.conn(), id_bhh, "burn_header_height", |r| { u64::from_row(r) .expect("FATAL: malformed burn_header_height") .try_into() @@ -75,37 +188,79 @@ impl<'a> HeadersDB for HeadersDBConn<'a> { }) } - fn get_vrf_seed_for_block(&self, id_bhh: &StacksBlockId) -> Option { - get_stacks_header_column(self.0, id_bhh, "proof", |r| { - let proof = VRFProof::from_column(r, "proof").expect("FATAL: malformed proof"); - VRFSeed::from_proof(&proof) - }) + fn get_vrf_seed_for_block( + &self, + id_bhh: &StacksBlockId, + epoch: &StacksEpochId, + ) -> Option { + let tenure_id_bhh = get_first_block_in_tenure(&self.0, id_bhh, Some(epoch)); + let (column_name, nakamoto) = if epoch.uses_nakamoto_blocks() { + ("vrf_proof", true) + } else { + ("proof", false) + }; + get_stacks_header_column_from_table( + self.0.conn(), + &tenure_id_bhh.0, + column_name, + &|r| { + let proof = VRFProof::from_column(r, column_name).expect("FATAL: malformed proof"); + VRFSeed::from_proof(&proof) + }, + nakamoto, + ) } - fn get_miner_address(&self, id_bhh: &StacksBlockId) -> Option { - get_miner_column(self.0, id_bhh, "address", |r| { + fn get_miner_address( + &self, + id_bhh: &StacksBlockId, + epoch: &StacksEpochId, + ) -> Option { + let tenure_id_bhh = get_first_block_in_tenure(&self.0, id_bhh, Some(epoch)); + get_miner_column(self.0.conn(), &tenure_id_bhh, "address", |r| { let s: String = r.get_unwrap("address"); let addr = StacksAddress::from_string(&s).expect("FATAL: malformed address"); addr }) } - fn get_burnchain_tokens_spent_for_block(&self, id_bhh: &StacksBlockId) -> Option { - get_miner_column(self.0, id_bhh, "burnchain_sortition_burn", |r| { - u64::from_row(r).expect("FATAL: malformed sortition burn") - }) + fn get_burnchain_tokens_spent_for_block( + &self, + id_bhh: &StacksBlockId, + epoch: &StacksEpochId, + ) -> Option { + let tenure_id_bhh = get_first_block_in_tenure(&self.0, id_bhh, Some(epoch)); + get_miner_column( + self.0.conn(), + &tenure_id_bhh, + "burnchain_sortition_burn", + |r| u64::from_row(r).expect("FATAL: malformed sortition burn"), + ) .map(|x| x.into()) } - fn get_burnchain_tokens_spent_for_winning_block(&self, id_bhh: &StacksBlockId) -> Option { - get_miner_column(self.0, id_bhh, "burnchain_commit_burn", |r| { - u64::from_row(r).expect("FATAL: malformed commit burn") - }) + fn get_burnchain_tokens_spent_for_winning_block( + &self, + id_bhh: &StacksBlockId, + epoch: &StacksEpochId, + ) -> Option { + let tenure_id_bhh = get_first_block_in_tenure(&self.0, id_bhh, Some(epoch)); + get_miner_column( + self.0.conn(), + &tenure_id_bhh, + "burnchain_commit_burn", + |r| u64::from_row(r).expect("FATAL: malformed commit burn"), + ) .map(|x| x.into()) } - fn get_tokens_earned_for_block(&self, id_bhh: &StacksBlockId) -> Option { - get_matured_reward(self.0, id_bhh).map(|x| x.total().into()) + fn get_tokens_earned_for_block( + &self, + id_bhh: &StacksBlockId, + epoch: &StacksEpochId, + ) -> Option { + let tenure_id_bhh = get_first_block_in_tenure(&self.0, id_bhh, Some(epoch)); + get_matured_reward(&self.0, &tenure_id_bhh, epoch).map(|x| x.total().into()) } } @@ -113,10 +268,17 @@ impl<'a> HeadersDB for ChainstateTx<'a> { fn get_stacks_block_header_hash_for_block( &self, id_bhh: &StacksBlockId, + epoch: &StacksEpochId, ) -> Option { - get_stacks_header_column(self.deref().deref(), id_bhh, "block_hash", |r| { - BlockHeaderHash::from_column(r, "block_hash").expect("FATAL: malformed block hash") - }) + get_stacks_header_column_from_table( + self.deref().deref(), + id_bhh, + "block_hash", + &|r| { + BlockHeaderHash::from_column(r, "block_hash").expect("FATAL: malformed block hash") + }, + epoch.uses_nakamoto_blocks(), + ) } fn get_burn_header_hash_for_block( @@ -128,16 +290,48 @@ impl<'a> HeadersDB for ChainstateTx<'a> { }) } - fn get_consensus_hash_for_block(&self, id_bhh: &StacksBlockId) -> Option { - get_stacks_header_column(self.deref().deref(), id_bhh, "consensus_hash", |r| { - ConsensusHash::from_row(r).expect("FATAL: malformed consensus_hash") - }) + fn get_consensus_hash_for_block( + &self, + id_bhh: &StacksBlockId, + epoch: &StacksEpochId, + ) -> Option { + get_stacks_header_column_from_table( + self.deref().deref(), + id_bhh, + "consensus_hash", + &|r| ConsensusHash::from_row(r).expect("FATAL: malformed consensus_hash"), + epoch.uses_nakamoto_blocks(), + ) } - fn get_burn_block_time_for_block(&self, id_bhh: &StacksBlockId) -> Option { - get_stacks_header_column(self.deref().deref(), id_bhh, "burn_header_timestamp", |r| { - u64::from_row(r).expect("FATAL: malformed burn_header_timestamp") - }) + fn get_burn_block_time_for_block( + &self, + id_bhh: &StacksBlockId, + epoch_opt: Option<&StacksEpochId>, + ) -> Option { + if let Some(epoch) = epoch_opt { + get_stacks_header_column_from_table( + self.deref().deref(), + id_bhh, + "burn_header_timestamp", + &|r| u64::from_row(r).expect("FATAL: malformed burn_header_timestamp"), + epoch.uses_nakamoto_blocks(), + ) + } else { + get_stacks_header_column(self.deref().deref(), id_bhh, "burn_header_timestamp", |r| { + u64::from_row(r).expect("FATAL: malformed burn_header_timestamp") + }) + } + } + + fn get_stacks_block_time_for_block(&self, id_bhh: &StacksBlockId) -> Option { + get_stacks_header_column_from_table( + self.deref().deref(), + id_bhh, + "timestamp", + &|r| u64::from_row(r).expect("FATAL: malformed timestamp"), + true, + ) } fn get_burn_block_height_for_block(&self, id_bhh: &StacksBlockId) -> Option { @@ -149,40 +343,79 @@ impl<'a> HeadersDB for ChainstateTx<'a> { }) } - fn get_vrf_seed_for_block(&self, id_bhh: &StacksBlockId) -> Option { - get_stacks_header_column(self.deref().deref(), id_bhh, "proof", |r| { - let proof = VRFProof::from_column(r, "proof").expect("FATAL: malformed proof"); - VRFSeed::from_proof(&proof) - }) + fn get_vrf_seed_for_block( + &self, + id_bhh: &StacksBlockId, + epoch: &StacksEpochId, + ) -> Option { + let tenure_id_bhh = get_first_block_in_tenure(self.deref(), id_bhh, Some(epoch)); + let (column_name, nakamoto) = if epoch.uses_nakamoto_blocks() { + ("vrf_proof", true) + } else { + ("proof", false) + }; + get_stacks_header_column_from_table( + self.deref().deref(), + &tenure_id_bhh.0, + column_name, + &|r| { + let proof = VRFProof::from_column(r, column_name).expect("FATAL: malformed proof"); + VRFSeed::from_proof(&proof) + }, + nakamoto, + ) } - fn get_miner_address(&self, id_bhh: &StacksBlockId) -> Option { - get_miner_column(self.deref().deref(), id_bhh, "address", |r| { + fn get_miner_address( + &self, + id_bhh: &StacksBlockId, + epoch: &StacksEpochId, + ) -> Option { + let tenure_id_bhh = get_first_block_in_tenure(self.deref(), id_bhh, Some(epoch)); + get_miner_column(self.deref().deref(), &tenure_id_bhh, "address", |r| { let s: String = r.get_unwrap("address"); let addr = StacksAddress::from_string(&s).expect("FATAL: malformed address"); addr }) } - fn get_burnchain_tokens_spent_for_block(&self, id_bhh: &StacksBlockId) -> Option { + fn get_burnchain_tokens_spent_for_block( + &self, + id_bhh: &StacksBlockId, + epoch: &StacksEpochId, + ) -> Option { + let tenure_id_bhh = get_first_block_in_tenure(self.deref(), id_bhh, Some(epoch)); get_miner_column( self.deref().deref(), - id_bhh, + &tenure_id_bhh, "burnchain_sortition_burn", |r| u64::from_row(r).expect("FATAL: malformed sortition burn"), ) .map(|x| x.into()) } - fn get_burnchain_tokens_spent_for_winning_block(&self, id_bhh: &StacksBlockId) -> Option { - get_miner_column(self.deref().deref(), id_bhh, "burnchain_commit_burn", |r| { - u64::from_row(r).expect("FATAL: malformed commit burn") - }) + fn get_burnchain_tokens_spent_for_winning_block( + &self, + id_bhh: &StacksBlockId, + epoch: &StacksEpochId, + ) -> Option { + let tenure_id_bhh = get_first_block_in_tenure(self.deref(), id_bhh, Some(epoch)); + get_miner_column( + self.deref().deref(), + &tenure_id_bhh, + "burnchain_commit_burn", + |r| u64::from_row(r).expect("FATAL: malformed commit burn"), + ) .map(|x| x.into()) } - fn get_tokens_earned_for_block(&self, id_bhh: &StacksBlockId) -> Option { - get_matured_reward(self.deref().deref(), id_bhh).map(|x| x.total().into()) + fn get_tokens_earned_for_block( + &self, + id_bhh: &StacksBlockId, + epoch: &StacksEpochId, + ) -> Option { + let tenure_id_bhh = get_first_block_in_tenure(self.deref(), id_bhh, Some(epoch)); + get_matured_reward(self.deref(), &tenure_id_bhh, epoch).map(|x| x.total().into()) } } @@ -190,10 +423,17 @@ impl HeadersDB for MARF { fn get_stacks_block_header_hash_for_block( &self, id_bhh: &StacksBlockId, + epoch: &StacksEpochId, ) -> Option { - get_stacks_header_column(self.sqlite_conn(), id_bhh, "block_hash", |r| { - BlockHeaderHash::from_column(r, "block_hash").expect("FATAL: malformed block hash") - }) + get_stacks_header_column_from_table( + self.sqlite_conn(), + id_bhh, + "block_hash", + &|r| { + BlockHeaderHash::from_column(r, "block_hash").expect("FATAL: malformed block hash") + }, + epoch.uses_nakamoto_blocks(), + ) } fn get_burn_header_hash_for_block( @@ -205,16 +445,48 @@ impl HeadersDB for MARF { }) } - fn get_consensus_hash_for_block(&self, id_bhh: &StacksBlockId) -> Option { - get_stacks_header_column(self.sqlite_conn(), id_bhh, "consensus_hash", |r| { - ConsensusHash::from_row(r).expect("FATAL: malformed consensus_hash") - }) + fn get_consensus_hash_for_block( + &self, + id_bhh: &StacksBlockId, + epoch: &StacksEpochId, + ) -> Option { + get_stacks_header_column_from_table( + self.sqlite_conn(), + id_bhh, + "consensus_hash", + &|r| ConsensusHash::from_row(r).expect("FATAL: malformed consensus_hash"), + epoch.uses_nakamoto_blocks(), + ) } - fn get_burn_block_time_for_block(&self, id_bhh: &StacksBlockId) -> Option { - get_stacks_header_column(self.sqlite_conn(), id_bhh, "burn_header_timestamp", |r| { - u64::from_row(r).expect("FATAL: malformed burn_header_timestamp") - }) + fn get_burn_block_time_for_block( + &self, + id_bhh: &StacksBlockId, + epoch_opt: Option<&StacksEpochId>, + ) -> Option { + if let Some(epoch) = epoch_opt { + get_stacks_header_column_from_table( + self.sqlite_conn(), + id_bhh, + "burn_header_timestamp", + &|r| u64::from_row(r).expect("FATAL: malformed burn_header_timestamp"), + epoch.uses_nakamoto_blocks(), + ) + } else { + get_stacks_header_column(self.sqlite_conn(), id_bhh, "burn_header_timestamp", |r| { + u64::from_row(r).expect("FATAL: malformed burn_header_timestamp") + }) + } + } + + fn get_stacks_block_time_for_block(&self, id_bhh: &StacksBlockId) -> Option { + get_stacks_header_column_from_table( + self.sqlite_conn(), + id_bhh, + "timestamp", + &|r| u64::from_row(r).expect("FATAL: malformed timestamp"), + true, + ) } fn get_burn_block_height_for_block(&self, id_bhh: &StacksBlockId) -> Option { @@ -226,78 +498,103 @@ impl HeadersDB for MARF { }) } - fn get_vrf_seed_for_block(&self, id_bhh: &StacksBlockId) -> Option { - get_stacks_header_column(self.sqlite_conn(), id_bhh, "proof", |r| { - let proof = VRFProof::from_column(r, "proof").expect("FATAL: malformed proof"); - VRFSeed::from_proof(&proof) - }) + fn get_vrf_seed_for_block( + &self, + id_bhh: &StacksBlockId, + epoch: &StacksEpochId, + ) -> Option { + let tenure_id_bhh = get_first_block_in_tenure(self, id_bhh, Some(epoch)); + let (column_name, nakamoto) = if epoch.uses_nakamoto_blocks() { + ("vrf_proof", true) + } else { + ("proof", false) + }; + get_stacks_header_column_from_table( + self.sqlite_conn(), + &tenure_id_bhh.0, + column_name, + &|r| { + let proof = VRFProof::from_column(r, column_name).expect("FATAL: malformed proof"); + VRFSeed::from_proof(&proof) + }, + nakamoto, + ) } - fn get_miner_address(&self, id_bhh: &StacksBlockId) -> Option { - get_miner_column(self.sqlite_conn(), id_bhh, "address", |r| { + fn get_miner_address( + &self, + id_bhh: &StacksBlockId, + epoch: &StacksEpochId, + ) -> Option { + let tenure_id_bhh = get_first_block_in_tenure(self, id_bhh, Some(epoch)); + get_miner_column(self.sqlite_conn(), &tenure_id_bhh, "address", |r| { let s: String = r.get_unwrap("address"); let addr = StacksAddress::from_string(&s).expect("FATAL: malformed address"); addr }) } - fn get_burnchain_tokens_spent_for_block(&self, id_bhh: &StacksBlockId) -> Option { + fn get_burnchain_tokens_spent_for_block( + &self, + id_bhh: &StacksBlockId, + epoch: &StacksEpochId, + ) -> Option { + let tenure_id_bhh = get_first_block_in_tenure(self, id_bhh, Some(epoch)); get_miner_column( self.sqlite_conn(), - id_bhh, + &tenure_id_bhh, "burnchain_sortition_burn", |r| u64::from_row(r).expect("FATAL: malformed sortition burn"), ) .map(|x| x.into()) } - fn get_burnchain_tokens_spent_for_winning_block(&self, id_bhh: &StacksBlockId) -> Option { - get_miner_column(self.sqlite_conn(), id_bhh, "burnchain_commit_burn", |r| { - u64::from_row(r).expect("FATAL: malformed commit burn") - }) + fn get_burnchain_tokens_spent_for_winning_block( + &self, + id_bhh: &StacksBlockId, + epoch: &StacksEpochId, + ) -> Option { + let tenure_id_bhh = get_first_block_in_tenure(self, id_bhh, Some(epoch)); + get_miner_column( + self.sqlite_conn(), + &tenure_id_bhh, + "burnchain_commit_burn", + |r| u64::from_row(r).expect("FATAL: malformed commit burn"), + ) .map(|x| x.into()) } - fn get_tokens_earned_for_block(&self, id_bhh: &StacksBlockId) -> Option { - get_matured_reward(self.sqlite_conn(), id_bhh).map(|x| x.total().into()) + fn get_tokens_earned_for_block( + &self, + id_bhh: &StacksBlockId, + epoch: &StacksEpochId, + ) -> Option { + let tenure_id_bhh = get_first_block_in_tenure(self, id_bhh, Some(epoch)); + get_matured_reward(self, &tenure_id_bhh, epoch).map(|x| x.total().into()) } } -fn get_stacks_header_column( +/// Select a specific column from the headers table, specifying whether to use +/// the original block headers table or the Nakamoto block headers table. +pub fn get_stacks_header_column_from_table( conn: &DBConn, id_bhh: &StacksBlockId, column_name: &str, - loader: F, + loader: &F, + nakamoto: bool, ) -> Option where F: Fn(&Row) -> R, { - let args: &[&dyn ToSql] = &[id_bhh]; - if let Some(result) = conn - .query_row( - &format!( - "SELECT {} FROM block_headers WHERE index_block_hash = ?", - column_name - ), - args, - |x| Ok(loader(x)), - ) - .optional() - .unwrap_or_else(|_| { - panic!( - "Unexpected SQL failure querying block header table for '{}'", - column_name - ) - }) - { - return Some(result); - } - // if nothing was found in `block_headers`, try `nakamoto_block_headers` + let args = params![id_bhh]; + let table_name = if nakamoto { + "nakamoto_block_headers" + } else { + "block_headers" + }; + conn.query_row( - &format!( - "SELECT {} FROM nakamoto_block_headers WHERE index_block_hash = ?", - column_name - ), + &format!("SELECT {column_name} FROM {table_name} WHERE index_block_hash = ?",), args, |x| Ok(loader(x)), ) @@ -310,16 +607,84 @@ where }) } -fn get_miner_column( +fn get_stacks_header_column( conn: &DBConn, id_bhh: &StacksBlockId, column_name: &str, loader: F, ) -> Option +where + F: Fn(&Row) -> R, +{ + match get_stacks_header_column_from_table(conn, id_bhh, column_name, &loader, false) { + Some(x) => Some(x), + None => get_stacks_header_column_from_table(conn, id_bhh, column_name, &loader, true), + } +} + +fn get_first_block_in_tenure( + conn: >S, + id_bhh: &StacksBlockId, + epoch_opt: Option<&StacksEpochId>, +) -> TenureBlockId { + let consensus_hash = match epoch_opt { + Some(epoch) => { + if !epoch.uses_nakamoto_blocks() { + return id_bhh.clone().into(); + } else { + get_stacks_header_column_from_table( + conn.conn(), + id_bhh, + "consensus_hash", + &|r| ConsensusHash::from_row(r).expect("FATAL: malformed consensus_hash"), + true, + ) + } + } + None => { + if let Some(_) = get_stacks_header_column_from_table( + conn.conn(), + id_bhh, + "consensus_hash", + &|r| ConsensusHash::from_row(r).expect("FATAL: malformed consensus_hash"), + false, + ) { + return id_bhh.clone().into(); + } else { + get_stacks_header_column_from_table( + conn.conn(), + id_bhh, + "consensus_hash", + &|r| ConsensusHash::from_row(r).expect("FATAL: malformed consensus_hash"), + true, + ) + } + } + }; + + // SAFETY: if we reach this point, then `id_bhh` is a Nakamoto block and has a well-defined + // tenure-start block ID. + let ch = consensus_hash + .expect("Unexpected SQL failure querying block header table for 'consensus_hash'"); + + let tenure_start_id: TenureBlockId = conn + .get_tenure_block_id(id_bhh, &ch) + .expect("FATAL: failed to query DB for tenure-start block") + .expect("FATAL: no tenure start block for Nakamoto block"); + + tenure_start_id +} + +fn get_miner_column( + conn: &DBConn, + id_bhh: &TenureBlockId, + column_name: &str, + loader: F, +) -> Option where F: FnOnce(&Row) -> R, { - let args: &[&dyn ToSql] = &[id_bhh]; + let args = params![id_bhh.0]; conn.query_row( &format!( "SELECT {} FROM payments WHERE index_block_hash = ? AND miner = 1", @@ -337,11 +702,21 @@ where }) } -fn get_matured_reward(conn: &DBConn, child_id_bhh: &StacksBlockId) -> Option { +fn get_matured_reward( + conn: >S, + child_id_bhh: &TenureBlockId, + epoch: &StacksEpochId, +) -> Option { + let table_name = if epoch.uses_nakamoto_blocks() { + "nakamoto_block_headers" + } else { + "block_headers" + }; let parent_id_bhh = conn + .conn() .query_row( - "SELECT parent_block_id FROM block_headers WHERE index_block_hash = ?", - [child_id_bhh].iter(), + &format!("SELECT parent_block_id FROM {table_name} WHERE index_block_hash = ?"), + params![child_id_bhh.0], |x| { Ok(StacksBlockId::from_column(x, "parent_block_id") .expect("Bad parent_block_id in database")) @@ -351,7 +726,8 @@ fn get_matured_reward(conn: &DBConn, child_id_bhh: &StacksBlockId) -> Option { } } -impl SortitionDBRef for SortitionDBConn<'_> { +impl SortitionDBRef for SortitionHandleConn<'_> { fn get_pox_start_cycle_info( &self, sortition_id: &SortitionId, parent_stacks_block_burn_ht: u64, cycle_index: u64, ) -> Result, ChainstateError> { - let mut handle = self.as_handle(sortition_id); + let readonly_marf = self.index.reopen_readonly()?; + let mut context = self.context.clone(); + context.chain_tip = sortition_id.clone(); + let mut handle = SortitionHandleConn::new(&readonly_marf, context); + get_pox_start_cycle_info(&mut handle, parent_stacks_block_burn_ht, cycle_index) } @@ -448,6 +828,14 @@ impl SortitionDBRef for SortitionDBConn<'_> { } impl BurnStateDB for SortitionHandleTx<'_> { + fn get_tip_burn_block_height(&self) -> Option { + self.get_burn_block_height(&self.context.chain_tip) + } + + fn get_tip_sortition_id(&self) -> Option { + Some(self.context.chain_tip.clone()) + } + fn get_burn_block_height(&self, sortition_id: &SortitionId) -> Option { match SortitionDB::get_block_snapshot(self.tx(), sortition_id) { Ok(Some(x)) => Some(x.block_height as u32), @@ -569,7 +957,23 @@ impl BurnStateDB for SortitionHandleTx<'_> { } } -impl BurnStateDB for SortitionDBConn<'_> { +impl BurnStateDB for SortitionHandleConn<'_> { + fn get_tip_burn_block_height(&self) -> Option { + let tip = match SortitionDB::get_block_snapshot(self.conn(), &self.context.chain_tip) { + Ok(Some(x)) => x, + _ => return None, + }; + tip.block_height.try_into().ok() + } + + fn get_tip_sortition_id(&self) -> Option { + let tip = match SortitionDB::get_block_snapshot(self.conn(), &self.context.chain_tip) { + Ok(Some(x)) => x, + _ => return None, + }; + Some(tip.sortition_id) + } + fn get_burn_block_height(&self, sortition_id: &SortitionId) -> Option { match SortitionDB::get_block_snapshot(self.conn(), sortition_id) { Ok(Some(x)) => Some(x.block_height as u32), @@ -582,8 +986,6 @@ impl BurnStateDB for SortitionDBConn<'_> { height: u32, sortition_id: &SortitionId, ) -> Option { - let db_handle = SortitionHandleConn::open_reader(self, &sortition_id).ok()?; - let current_height = match self.get_burn_block_height(sortition_id) { None => { return None; @@ -595,7 +997,7 @@ impl BurnStateDB for SortitionDBConn<'_> { return None; } - match db_handle.get_block_snapshot_by_height(height as u64) { + match self.get_block_snapshot_by_height(height as u64) { Ok(Some(x)) => Some(x.burn_header_hash), _ => return None, } @@ -770,4 +1172,37 @@ impl ClarityBackingStore for MemoryBackingStore { } Ok(()) } + + fn get_contract_hash( + &mut self, + contract: &QualifiedContractIdentifier, + ) -> InterpreterResult<(StacksBlockId, Sha512Trunc256Sum)> { + sqlite_get_contract_hash(self, contract) + } + + fn insert_metadata( + &mut self, + contract: &QualifiedContractIdentifier, + key: &str, + value: &str, + ) -> InterpreterResult<()> { + sqlite_insert_metadata(self, contract, key, value) + } + + fn get_metadata( + &mut self, + contract: &QualifiedContractIdentifier, + key: &str, + ) -> InterpreterResult> { + sqlite_get_metadata(self, contract, key) + } + + fn get_metadata_manual( + &mut self, + at_height: u32, + contract: &QualifiedContractIdentifier, + key: &str, + ) -> InterpreterResult> { + sqlite_get_metadata_manual(self, at_height, contract, key) + } } diff --git a/stackslib/src/clarity_vm/tests/analysis_costs.rs b/stackslib/src/clarity_vm/tests/analysis_costs.rs index 321d4939a01..4fe887f2c38 100644 --- a/stackslib/src/clarity_vm/tests/analysis_costs.rs +++ b/stackslib/src/clarity_vm/tests/analysis_costs.rs @@ -258,7 +258,7 @@ fn epoch_21_test_all(use_mainnet: bool, version: ClarityVersion) { let baseline = test_tracked_costs("1", StacksEpochId::Epoch21, version, 0, &mut instance); for (ix, f) in NativeFunctions::ALL.iter().enumerate() { - if version < f.get_version() { + if version < f.get_min_version() || f.get_max_version().map_or(false, |max| version > max) { continue; } @@ -295,7 +295,7 @@ fn epoch_205_test_all(use_mainnet: bool) { ); for (ix, f) in NativeFunctions::ALL.iter().enumerate() { - if f.get_version() == ClarityVersion::Clarity1 { + if f.get_min_version() == ClarityVersion::Clarity1 { let test = get_simple_test(f); let cost = test_tracked_costs( test, diff --git a/stackslib/src/clarity_vm/tests/contracts.rs b/stackslib/src/clarity_vm/tests/contracts.rs index b9916dac11e..c7de36aa1c0 100644 --- a/stackslib/src/clarity_vm/tests/contracts.rs +++ b/stackslib/src/clarity_vm/tests/contracts.rs @@ -14,6 +14,7 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . +use clarity::types::StacksEpochId; use clarity::vm::ast::errors::ParseErrors; use clarity::vm::ast::ASTRules; use clarity::vm::clarity::Error as ClarityError; @@ -30,7 +31,7 @@ use clarity::vm::tests::{ use clarity::vm::types::SequenceData::Buffer; use clarity::vm::types::{ BuffData, OptionalData, PrincipalData, QualifiedContractIdentifier, ResponseData, - StandardPrincipalData, TypeSignature, Value, + StandardPrincipalData, TupleData, TypeSignature, Value, }; use clarity::vm::Value::Sequence; use clarity::vm::{ast, execute as vm_execute, ClarityVersion}; @@ -168,7 +169,7 @@ fn test_get_burn_block_info_eval() { // burnchain is 100 blocks ahead of stacks chain in this sim assert_eq!( Value::Optional(OptionalData { data: None }), - tx.eval_read_only(&contract_identifier, "(test-func u103)") + tx.eval_read_only(&contract_identifier, "(test-func u203)") .unwrap() ); }); @@ -416,7 +417,7 @@ fn trait_invocation_cross_epoch() { let sender = StacksAddress::burn_address(false).into(); - info!("Sim height = {}", sim.height); + info!("Sim height = {}", sim.block_height); sim.execute_next_block_as_conn(|conn| { let epoch = conn.get_epoch(); let clarity_version = ClarityVersion::default_for_epoch(epoch); @@ -425,7 +426,7 @@ fn trait_invocation_cross_epoch() { publish_contract(conn, &use_contract_id, use_contract, clarity_version).unwrap(); }); // Advance another block so we get to Stacks 2.1. This is the last block in 2.05 - info!("Sim height = {}", sim.height); + info!("Sim height = {}", sim.block_height); sim.execute_next_block(|_| {}); // now in Stacks 2.1 sim.execute_next_block_as_conn(|conn| { @@ -435,7 +436,7 @@ fn trait_invocation_cross_epoch() { publish_contract(conn, &invoke_contract_id, invoke_contract, clarity_version).unwrap(); }); - info!("Sim height = {}", sim.height); + info!("Sim height = {}", sim.block_height); sim.execute_next_block_as_conn(|conn| { let epoch = conn.get_epoch(); conn.as_transaction(|clarity_db| { @@ -452,7 +453,7 @@ fn trait_invocation_cross_epoch() { }); }); - info!("Sim height = {}", sim.height); + info!("Sim height = {}", sim.block_height); // now in Stacks 2.2 sim.execute_next_block_as_conn(|conn| { let epoch = conn.get_epoch(); @@ -476,7 +477,7 @@ fn trait_invocation_cross_epoch() { }); }); - info!("Sim height = {}", sim.height); + info!("Sim height = {}", sim.block_height); sim.execute_next_block_as_conn(|conn| { let epoch = conn.get_epoch(); conn.as_transaction(|clarity_db| { @@ -500,7 +501,7 @@ fn trait_invocation_cross_epoch() { }); // should now be in Stacks 2.3, so the invocation should work again! - info!("Sim height = {}", sim.height); + info!("Sim height = {}", sim.block_height); sim.execute_next_block_as_conn(|conn| { let epoch = conn.get_epoch(); conn.as_transaction(|clarity_db| { @@ -517,7 +518,7 @@ fn trait_invocation_cross_epoch() { }); }); - info!("Sim height = {}", sim.height); + info!("Sim height = {}", sim.block_height); sim.execute_next_block_as_conn(|conn| { let epoch = conn.get_epoch(); conn.as_transaction(|clarity_db| { @@ -882,3 +883,880 @@ fn trait_with_trait_invocation_cross_epoch() { }); }); } + +#[test] +fn test_block_heights() { + let mut sim = ClarityTestSim::new(); + sim.epoch_bounds = vec![0, 1, 2, 3, 4, 5, 6, 7]; + + let contract_identifier1 = QualifiedContractIdentifier::local("test-contract-1").unwrap(); + let contract_identifier2 = QualifiedContractIdentifier::local("test-contract-2").unwrap(); + + // Advance to epoch 3.0 + while sim.block_height <= 7 { + sim.execute_next_block(|_env| {}); + } + + let block_height = sim.block_height as u128; + let burn_block_height = sim.burn_block_height() as u128; + let tenure_height = sim.tenure_height as u128; + sim.execute_next_block_as_conn(|conn| { + let epoch = conn.get_epoch(); + assert_eq!(epoch, StacksEpochId::Epoch30); + + // This version uses the Clarity 1 / 2 keywords + let contract_clarity1 = + "(define-private (test-func) { burn-block-height: burn-block-height, block-height: block-height })"; + // This version uses the Clarity 3 keywords + let contract_clarity3 = + "(define-private (test-func) { burn-block-height: burn-block-height, stacks-block-height: stacks-block-height, tenure-height: tenure-height })"; + + // Check both contracts in Clarity 1, publish the Clarity 1 contract + conn.as_transaction(|clarity_db| { + // analyze the contracts as Clarity 1 + let (ast, analysis) = clarity_db.analyze_smart_contract( + &contract_identifier1, + ClarityVersion::Clarity1, + &contract_clarity1, + ASTRules::PrecheckSize, + ).unwrap(); + + let res = clarity_db.analyze_smart_contract( + &contract_identifier2, + ClarityVersion::Clarity1, + &contract_clarity3, + ASTRules::PrecheckSize, + ); + if let Err(ClarityError::Analysis(check_error)) = res { + if let CheckErrors::UndefinedVariable(var_name) = check_error.err { + assert_eq!(var_name, "stacks-block-height"); + } else { + panic!("Bad analysis error: {:?}", &check_error); + } + } else { + panic!("Bad analysis result: {:?}", &res); + } + + // Publish the Clarity 1 contract + clarity_db + .initialize_smart_contract( + &contract_identifier1, + ClarityVersion::Clarity1, + &ast, + contract_clarity1, + None, + |_, _| false, + ).unwrap(); + + // analyze the contracts as Clarity 2 + let (ast, analysis) = clarity_db.analyze_smart_contract( + &contract_identifier1, + ClarityVersion::Clarity2, + &contract_clarity1, + ASTRules::PrecheckSize, + ).unwrap(); + + let res = clarity_db.analyze_smart_contract( + &contract_identifier2, + ClarityVersion::Clarity2, + &contract_clarity3, + ASTRules::PrecheckSize, + ); + if let Err(ClarityError::Analysis(check_error)) = res { + if let CheckErrors::UndefinedVariable(var_name) = check_error.err { + assert_eq!(var_name, "stacks-block-height"); + } else { + panic!("Bad analysis error: {:?}", &check_error); + } + } else { + panic!("Bad analysis result: {:?}", &res); + } + + // analyze the contracts as Clarity 3 + let res = clarity_db.analyze_smart_contract( + &contract_identifier1, + ClarityVersion::Clarity3, + &contract_clarity1, + ASTRules::PrecheckSize, + ); + if let Err(ClarityError::Analysis(check_error)) = res { + if let CheckErrors::UndefinedVariable(var_name) = check_error.err { + assert_eq!(var_name, "block-height"); + } else { + panic!("Bad analysis error: {:?}", &check_error); + } + } else { + panic!("Bad analysis result: {:?}", &res); + } + + let (ast, analysis) = clarity_db.analyze_smart_contract( + &contract_identifier2, + ClarityVersion::Clarity3, + &contract_clarity3, + ASTRules::PrecheckSize, + ).unwrap(); + + // Publish the Clarity 3 contract + clarity_db + .initialize_smart_contract( + &contract_identifier2, + ClarityVersion::Clarity3, + &ast, + contract_clarity3, + None, + |_, _| false, + ).unwrap(); + }); + + // Call the contracts and validate the results + let mut tx = conn.start_transaction_processing(); + assert_eq!( + Value::Tuple(TupleData::from_data(vec![ + ("burn-block-height".into(), Value::UInt(burn_block_height + 1)), + ("block-height".into(), Value::UInt(tenure_height + 1)) + ]).unwrap()), + tx.eval_read_only(&contract_identifier1, "(test-func)") + .unwrap() + ); + assert_eq!( + Value::Tuple(TupleData::from_data(vec![ + ("burn-block-height".into(), Value::UInt(burn_block_height + 1)), + ("stacks-block-height".into(), Value::UInt(block_height + 1)), + ("tenure-height".into(), Value::UInt(tenure_height + 1)) + ]).unwrap()), + tx.eval_read_only(&contract_identifier2, "(test-func)") + .unwrap() + ); + }); + + // Call the contracts in the next block and validate the results + let block_height = sim.block_height as u128; + let burn_block_height = sim.burn_block_height() as u128; + let tenure_height = sim.tenure_height as u128; + sim.execute_next_block_as_conn(|conn| { + let mut tx = conn.start_transaction_processing(); + assert_eq!( + Value::Tuple( + TupleData::from_data(vec![ + ( + "burn-block-height".into(), + Value::UInt(burn_block_height + 1) + ), + ("block-height".into(), Value::UInt(tenure_height + 1)), + ]) + .unwrap() + ), + tx.eval_read_only(&contract_identifier1, "(test-func)") + .unwrap() + ); + assert_eq!( + Value::Tuple( + TupleData::from_data(vec![ + ( + "burn-block-height".into(), + Value::UInt(burn_block_height + 1) + ), + ("stacks-block-height".into(), Value::UInt(block_height + 1)), + ("tenure-height".into(), Value::UInt(tenure_height + 1)) + ]) + .unwrap() + ), + tx.eval_read_only(&contract_identifier2, "(test-func)") + .unwrap() + ); + }); + + // Call the contracts in the next block with no new tenure and validate the results + let block_height = sim.block_height as u128; + let burn_block_height = sim.burn_block_height() as u128; + let tenure_height = sim.tenure_height as u128; + sim.execute_next_block_as_conn_with_tenure(false, |conn| { + let mut tx = conn.start_transaction_processing(); + assert_eq!( + Value::Tuple( + TupleData::from_data(vec![ + ("burn-block-height".into(), Value::UInt(burn_block_height)), + ("block-height".into(), Value::UInt(tenure_height)) + ]) + .unwrap() + ), + tx.eval_read_only(&contract_identifier1, "(test-func)") + .unwrap() + ); + assert_eq!( + Value::Tuple( + TupleData::from_data(vec![ + ("burn-block-height".into(), Value::UInt(burn_block_height)), + ("stacks-block-height".into(), Value::UInt(block_height + 1)), + ("tenure-height".into(), Value::UInt(tenure_height)) + ]) + .unwrap() + ), + tx.eval_read_only(&contract_identifier2, "(test-func)") + .unwrap() + ); + }); + + // Call the contracts in the next block with no new tenure and validate the results + let block_height = sim.block_height as u128; + let burn_block_height = sim.burn_block_height() as u128; + let tenure_height = sim.tenure_height as u128; + sim.execute_next_block_as_conn_with_tenure(false, |conn| { + let mut tx = conn.start_transaction_processing(); + assert_eq!( + Value::Tuple( + TupleData::from_data(vec![ + ("burn-block-height".into(), Value::UInt(burn_block_height)), + ("block-height".into(), Value::UInt(tenure_height)) + ]) + .unwrap() + ), + tx.eval_read_only(&contract_identifier1, "(test-func)") + .unwrap() + ); + assert_eq!( + Value::Tuple( + TupleData::from_data(vec![ + ("burn-block-height".into(), Value::UInt(burn_block_height)), + ("stacks-block-height".into(), Value::UInt(block_height + 1)), + ("tenure-height".into(), Value::UInt(tenure_height)) + ]) + .unwrap() + ), + tx.eval_read_only(&contract_identifier2, "(test-func)") + .unwrap() + ); + }); + + // Call the contracts in the next block with a new tenure and validate the results + let block_height = sim.block_height as u128; + let burn_block_height = sim.burn_block_height() as u128; + let tenure_height = sim.tenure_height as u128; + sim.execute_next_block_as_conn(|conn| { + let mut tx = conn.start_transaction_processing(); + assert_eq!( + Value::Tuple( + TupleData::from_data(vec![ + ( + "burn-block-height".into(), + Value::UInt(burn_block_height + 1) + ), + ("block-height".into(), Value::UInt(tenure_height + 1)) + ]) + .unwrap() + ), + tx.eval_read_only(&contract_identifier1, "(test-func)") + .unwrap() + ); + assert_eq!( + Value::Tuple( + TupleData::from_data(vec![ + ( + "burn-block-height".into(), + Value::UInt(burn_block_height + 1) + ), + ("stacks-block-height".into(), Value::UInt(block_height + 1)), + ("tenure-height".into(), Value::UInt(tenure_height + 1)) + ]) + .unwrap() + ), + tx.eval_read_only(&contract_identifier2, "(test-func)") + .unwrap() + ); + }); +} + +/// Test calling into a Clarity 1 or Clarity 2 contract which have bound +/// variable names `stacks-block-height` and `tenure-height` from a Clarity 3 +/// contract. +#[test] +fn test_block_heights_across_versions() { + let mut sim = ClarityTestSim::new(); + sim.epoch_bounds = vec![0, 1, 2, 3, 4, 5, 6, 7]; + + let contract_id_e2c1 = QualifiedContractIdentifier::local("epoch-2-clarity-1").unwrap(); + let contract_id_e2c2 = QualifiedContractIdentifier::local("epoch-2-clarity-2").unwrap(); + let contract_id_e3c3 = QualifiedContractIdentifier::local("epoch-3-clarity-3").unwrap(); + + let contract_e2c1_2 = r#" + (define-read-only (get-height (stacks-block-height int) (tenure-height bool)) + (if tenure-height + stacks-block-height + (+ stacks-block-height 1) + ) + ) + "#; + let contract_e3c3 = format!( + r#" + (define-read-only (call-e2 (version int)) + (if (is-eq version 1) + (contract-call? '{contract_id_e2c1} get-height 123 false) + (contract-call? '{contract_id_e2c2} get-height 456 true) + ) + ) + "# + ); + + sim.execute_next_block(|_env| {}); + + // Deploy the Clarity 1 contract in the next block + sim.execute_next_block_as_conn(|conn| { + conn.as_transaction(|clarity_db| { + // Analyze the Clarity 1 contract + let (ast, analysis) = clarity_db + .analyze_smart_contract( + &contract_id_e2c1, + ClarityVersion::Clarity1, + &contract_e2c1_2, + ASTRules::PrecheckSize, + ) + .unwrap(); + clarity_db + .save_analysis(&contract_id_e2c1, &analysis) + .unwrap(); + + // Publish the Clarity 1 contract + clarity_db + .initialize_smart_contract( + &contract_id_e2c1, + ClarityVersion::Clarity1, + &ast, + contract_e2c1_2, + None, + |_, _| false, + ) + .unwrap(); + }); + }); + + // Deploy the Clarity 2 contract in the next block + sim.execute_next_block_as_conn(|conn| { + conn.as_transaction(|clarity_db| { + // Analyze the Clarity 2 contract + let (ast, analysis) = clarity_db + .analyze_smart_contract( + &contract_id_e2c2, + ClarityVersion::Clarity2, + &contract_e2c1_2, + ASTRules::PrecheckSize, + ) + .unwrap(); + clarity_db + .save_analysis(&contract_id_e2c2, &analysis) + .unwrap(); + + // Publish the Clarity 2 contract + clarity_db + .initialize_smart_contract( + &contract_id_e2c2, + ClarityVersion::Clarity2, + &ast, + contract_e2c1_2, + None, + |_, _| false, + ) + .unwrap(); + }); + }); + + // Advance to epoch 3 + while sim.block_height <= 7 { + sim.execute_next_block(|_env| {}); + } + + // Deploy the Clarity 3 contract in the next block + sim.execute_next_block_as_conn(|conn| { + conn.as_transaction(|clarity_db| { + // Analyze the Clarity 3 contract + let (ast, analysis) = clarity_db + .analyze_smart_contract( + &contract_id_e3c3, + ClarityVersion::Clarity3, + &contract_e3c3, + ASTRules::PrecheckSize, + ) + .unwrap(); + + // Publish the Clarity 3 contract + clarity_db + .initialize_smart_contract( + &contract_id_e3c3, + ClarityVersion::Clarity3, + &ast, + &contract_e3c3, + None, + |_, _| false, + ) + .unwrap(); + }); + }); + + // Call the Clarity 3 contract and validate the results + sim.execute_next_block_as_conn(|conn| { + let mut tx = conn.start_transaction_processing(); + assert_eq!( + Value::Int(124), + tx.eval_read_only(&contract_id_e3c3, "(call-e2 1)").unwrap() + ); + assert_eq!( + Value::Int(456), + tx.eval_read_only(&contract_id_e3c3, "(call-e2 2)").unwrap() + ); + }); +} + +/// Test passing a Clarity 3 contract using `stacks-block-height` and +/// `tenure-height` as a trait into a Clarity 1 and Clarity 2 contract. +#[test] +fn test_block_heights_across_versions_traits_3_from_2() { + let mut sim = ClarityTestSim::new(); + sim.epoch_bounds = vec![0, 1, 2, 3, 4, 5, 6, 7]; + + let contract_id_e2c1 = QualifiedContractIdentifier::local("epoch-2-clarity-1").unwrap(); + let contract_id_e2c2 = QualifiedContractIdentifier::local("epoch-2-clarity-2").unwrap(); + let contract_id_e3c3 = QualifiedContractIdentifier::local("epoch-3-clarity-3").unwrap(); + + let contract_e2c1_2 = r#" + (define-trait getter ((get-int () (response uint uint)))) + (define-public (get-it (get-trait )) + (contract-call? get-trait get-int) + ) + "#; + let contract_e3c3 = format!( + r#" + (define-public (get-int) + (ok (+ stacks-block-height tenure-height)) + ) + "# + ); + + sim.execute_next_block(|_env| {}); + + // Deploy the Clarity 1 contract in the next block + sim.execute_next_block_as_conn(|conn| { + conn.as_transaction(|clarity_db| { + // Analyze the Clarity 1 contract + let (ast, analysis) = clarity_db + .analyze_smart_contract( + &contract_id_e2c1, + ClarityVersion::Clarity1, + &contract_e2c1_2, + ASTRules::PrecheckSize, + ) + .unwrap(); + + // Publish the Clarity 1 contract + clarity_db + .initialize_smart_contract( + &contract_id_e2c1, + ClarityVersion::Clarity1, + &ast, + contract_e2c1_2, + None, + |_, _| false, + ) + .unwrap(); + }); + }); + + // Deploy the Clarity 2 contract in the next block + sim.execute_next_block_as_conn(|conn| { + conn.as_transaction(|clarity_db| { + // Analyze the Clarity 2 contract + let (ast, analysis) = clarity_db + .analyze_smart_contract( + &contract_id_e2c2, + ClarityVersion::Clarity2, + &contract_e2c1_2, + ASTRules::PrecheckSize, + ) + .unwrap(); + + // Publish the Clarity 2 contract + clarity_db + .initialize_smart_contract( + &contract_id_e2c2, + ClarityVersion::Clarity2, + &ast, + contract_e2c1_2, + None, + |_, _| false, + ) + .unwrap(); + }); + }); + + // Advance to epoch 3 + while sim.block_height <= 7 { + sim.execute_next_block(|_env| {}); + } + + // Deploy the Clarity 3 contract in the next block + sim.execute_next_block_as_conn(|conn| { + conn.as_transaction(|clarity_db| { + // Analyze the Clarity 3 contract + let (ast, analysis) = clarity_db + .analyze_smart_contract( + &contract_id_e3c3, + ClarityVersion::Clarity3, + &contract_e3c3, + ASTRules::PrecheckSize, + ) + .unwrap(); + + // Publish the Clarity 3 contract + clarity_db + .initialize_smart_contract( + &contract_id_e3c3, + ClarityVersion::Clarity3, + &ast, + &contract_e3c3, + None, + |_, _| false, + ) + .unwrap(); + }); + }); + + // Call the Clarity 1 and 2 contracts, passing the Clarity 3 contract + sim.execute_next_block_as_conn(|conn| { + let mut tx = conn.start_transaction_processing(); + let res1 = tx + .run_contract_call( + &PrincipalData::parse("STNHKEPYEPJ8ET55ZZ0M5A34J0R3N5FM2CMMMAZ6").unwrap(), + None, + &contract_id_e2c1, + "get-it", + &[Value::Principal(contract_id_e3c3.clone().into())], + |_, _| false, + ) + .unwrap(); + assert_eq!(Value::okay(Value::UInt(20)).unwrap(), res1.0); + + let res2 = tx + .run_contract_call( + &PrincipalData::parse("STNHKEPYEPJ8ET55ZZ0M5A34J0R3N5FM2CMMMAZ6").unwrap(), + None, + &contract_id_e2c2, + "get-it", + &[Value::Principal(contract_id_e3c3.clone().into())], + |_, _| false, + ) + .unwrap(); + assert_eq!(Value::okay(Value::UInt(20)).unwrap(), res2.0); + }); +} + +/// Test passing a Clarity 2 contract using `stacks-block-height` and +/// `tenure-height` as a trait into a Clarity 3 contract. +#[test] +fn test_block_heights_across_versions_traits_2_from_3() { + let mut sim = ClarityTestSim::new(); + sim.epoch_bounds = vec![0, 1, 2, 3, 4, 5, 6, 7]; + + let contract_id_e2c1 = QualifiedContractIdentifier::local("epoch-2-clarity-1").unwrap(); + let contract_id_e2c2 = QualifiedContractIdentifier::local("epoch-2-clarity-2").unwrap(); + let contract_id_e3c3 = QualifiedContractIdentifier::local("epoch-3-clarity-3").unwrap(); + + let contract_e2c1_2 = r#" + (define-constant stacks-block-height u555) + (define-data-var tenure-height uint u222) + (define-public (get-int) + (ok (+ stacks-block-height (var-get tenure-height))) + ) + "#; + let contract_e3c3 = format!( + r#" + (define-trait getter ((get-int () (response uint uint)))) + (define-public (get-it (get-trait )) + (contract-call? get-trait get-int) + ) + "# + ); + + sim.execute_next_block(|_env| {}); + + // Deploy the Clarity 1 contract in the next block + sim.execute_next_block_as_conn(|conn| { + conn.as_transaction(|clarity_db| { + // Analyze the Clarity 1 contract + let (ast, analysis) = clarity_db + .analyze_smart_contract( + &contract_id_e2c1, + ClarityVersion::Clarity1, + &contract_e2c1_2, + ASTRules::PrecheckSize, + ) + .unwrap(); + + // Publish the Clarity 1 contract + clarity_db + .initialize_smart_contract( + &contract_id_e2c1, + ClarityVersion::Clarity1, + &ast, + contract_e2c1_2, + None, + |_, _| false, + ) + .unwrap(); + }); + }); + + // Deploy the Clarity 2 contract in the next block + sim.execute_next_block_as_conn(|conn| { + conn.as_transaction(|clarity_db| { + // Analyze the Clarity 2 contract + let (ast, analysis) = clarity_db + .analyze_smart_contract( + &contract_id_e2c2, + ClarityVersion::Clarity2, + &contract_e2c1_2, + ASTRules::PrecheckSize, + ) + .unwrap(); + + // Publish the Clarity 2 contract + clarity_db + .initialize_smart_contract( + &contract_id_e2c2, + ClarityVersion::Clarity2, + &ast, + contract_e2c1_2, + None, + |_, _| false, + ) + .unwrap(); + }); + }); + + // Advance to epoch 3 + while sim.block_height <= 7 { + sim.execute_next_block(|_env| {}); + } + + // Deploy the Clarity 3 contract in the next block + sim.execute_next_block_as_conn(|conn| { + conn.as_transaction(|clarity_db| { + // Analyze the Clarity 3 contract + let (ast, analysis) = clarity_db + .analyze_smart_contract( + &contract_id_e3c3, + ClarityVersion::Clarity3, + &contract_e3c3, + ASTRules::PrecheckSize, + ) + .unwrap(); + + // Publish the Clarity 3 contract + clarity_db + .initialize_smart_contract( + &contract_id_e3c3, + ClarityVersion::Clarity3, + &ast, + &contract_e3c3, + None, + |_, _| false, + ) + .unwrap(); + }); + }); + + // Call the Clarity 3 contract, passing the Clarity 1 and 2 contracts + sim.execute_next_block_as_conn(|conn| { + let mut tx = conn.start_transaction_processing(); + let res1 = tx + .run_contract_call( + &PrincipalData::parse("STNHKEPYEPJ8ET55ZZ0M5A34J0R3N5FM2CMMMAZ6").unwrap(), + None, + &contract_id_e3c3, + "get-it", + &[Value::Principal(contract_id_e2c1.clone().into())], + |_, _| false, + ) + .unwrap(); + assert_eq!(Value::okay(Value::UInt(777)).unwrap(), res1.0); + + let res2 = tx + .run_contract_call( + &PrincipalData::parse("STNHKEPYEPJ8ET55ZZ0M5A34J0R3N5FM2CMMMAZ6").unwrap(), + None, + &contract_id_e3c3, + "get-it", + &[Value::Principal(contract_id_e2c2.clone().into())], + |_, _| false, + ) + .unwrap(); + assert_eq!(Value::okay(Value::UInt(777)).unwrap(), res2.0); + }); +} + +#[test] +fn test_block_heights_at_block() { + let mut sim = ClarityTestSim::new(); + sim.epoch_bounds = vec![0, 1, 2, 3, 4, 5, 6, 7]; + + let contract_identifier = QualifiedContractIdentifier::local("test-contract").unwrap(); + + // Advance to epoch 3.0 + while sim.block_height <= 7 { + sim.execute_next_block(|_env| {}); + } + + let block_height = sim.block_height as u128; + sim.execute_next_block_as_conn(|conn| { + let epoch = conn.get_epoch(); + assert_eq!(epoch, StacksEpochId::Epoch30); + + let contract =r#" + (define-private (test-tenure) (at-block (unwrap-panic (get-stacks-block-info? id-header-hash u0)) tenure-height)) + (define-private (test-stacks) (at-block (unwrap-panic (get-stacks-block-info? id-header-hash u1)) stacks-block-height)) + "#; + + conn.as_transaction(|clarity_db| { + // Analyze the contract + let (ast, analysis) = clarity_db.analyze_smart_contract( + &contract_identifier, + ClarityVersion::Clarity3, + &contract, + ASTRules::PrecheckSize, + ).unwrap(); + + // Publish the contract + clarity_db + .initialize_smart_contract( + &contract_identifier, + ClarityVersion::Clarity3, + &ast, + contract, + None, + |_, _| false, + ).unwrap(); + }); + + // Call the contracts and validate the results + let mut tx = conn.start_transaction_processing(); + assert_eq!( + Value::UInt(0), + tx.eval_read_only(&contract_identifier, "(test-tenure)") + .unwrap() + ); + assert_eq!( + Value::UInt(1), + tx.eval_read_only(&contract_identifier, "(test-stacks)") + .unwrap() + ); + }); +} + +#[test] +fn test_get_block_info_time() { + let mut sim = ClarityTestSim::new(); + sim.epoch_bounds = vec![0, 1, 2, 3, 4, 5, 6, 7]; + + let contract_identifier2 = QualifiedContractIdentifier::local("test-contract-2").unwrap(); + let contract_identifier3 = QualifiedContractIdentifier::local("test-contract-3").unwrap(); + let contract_identifier3_3 = QualifiedContractIdentifier::local("test-contract-3-3").unwrap(); + + // Advance to epoch 3.0 + while sim.block_height <= 10 { + sim.execute_next_block(|_env| {}); + } + + let block_height = sim.block_height as u128; + sim.execute_next_block_as_conn(|conn| { + let epoch = conn.get_epoch(); + assert_eq!(epoch, StacksEpochId::Epoch30); + + let contract2 = "(define-private (get-time) (get-block-info? time (- block-height u1)))"; + let contract3 = + "(define-private (get-time) (get-stacks-block-info? time (- stacks-block-height u1)))"; + let contract3_3 = "(define-private (get-time) (get-stacks-block-info? time u1))"; + + conn.as_transaction(|clarity_db| { + // Analyze the contract as Clarity 2 + let (ast, analysis) = clarity_db + .analyze_smart_contract( + &contract_identifier2, + ClarityVersion::Clarity2, + &contract2, + ASTRules::PrecheckSize, + ) + .unwrap(); + + // Publish the contract as Clarity 2 + clarity_db + .initialize_smart_contract( + &contract_identifier2, + ClarityVersion::Clarity2, + &ast, + contract2, + None, + |_, _| false, + ) + .unwrap(); + + // Analyze the contract as Clarity 3 + let (ast, analysis) = clarity_db + .analyze_smart_contract( + &contract_identifier3, + ClarityVersion::Clarity3, + &contract3, + ASTRules::PrecheckSize, + ) + .unwrap(); + + // Publish the contract as Clarity 3 + clarity_db + .initialize_smart_contract( + &contract_identifier3, + ClarityVersion::Clarity3, + &ast, + contract3, + None, + |_, _| false, + ) + .unwrap(); + + // Analyze the contract as Clarity 3 + let (ast, analysis) = clarity_db + .analyze_smart_contract( + &contract_identifier3_3, + ClarityVersion::Clarity3, + &contract3_3, + ASTRules::PrecheckSize, + ) + .unwrap(); + + // Publish the contract as Clarity 3 + clarity_db + .initialize_smart_contract( + &contract_identifier3_3, + ClarityVersion::Clarity3, + &ast, + contract3_3, + None, + |_, _| false, + ) + .unwrap(); + }); + + // Call the contracts and validate the results + let mut tx = conn.start_transaction_processing(); + assert_eq!( + Value::some(Value::UInt(11)).unwrap(), + tx.eval_read_only(&contract_identifier2, "(get-time)") + .unwrap() + ); + assert_eq!( + Value::some(Value::UInt(1713799984)).unwrap(), + tx.eval_read_only(&contract_identifier3, "(get-time)") + .unwrap() + ); + assert_eq!( + Value::some(Value::UInt(1)).unwrap(), + tx.eval_read_only(&contract_identifier3_3, "(get-time)") + .unwrap() + ); + }); +} diff --git a/stackslib/src/clarity_vm/tests/costs.rs b/stackslib/src/clarity_vm/tests/costs.rs index a4d9f9294a8..29c57b2e92c 100644 --- a/stackslib/src/clarity_vm/tests/costs.rs +++ b/stackslib/src/clarity_vm/tests/costs.rs @@ -168,6 +168,8 @@ pub fn get_simple_test(function: &NativeFunctions) -> &'static str { ToConsensusBuff => "(to-consensus-buff? u1)", FromConsensusBuff => "(from-consensus-buff? bool 0x03)", ReplaceAt => "(replace-at? list-bar u0 5)", + GetStacksBlockInfo => "(get-block-info? time u1)", + GetTenureInfo => "(get-block-info? time u1)", } } @@ -951,7 +953,7 @@ fn epoch_20_205_test_all(use_mainnet: bool, epoch: StacksEpochId) { for (ix, f) in NativeFunctions::ALL.iter().enumerate() { // Note: The 2.0 and 2.05 test assumes Clarity1. - if f.get_version() == ClarityVersion::Clarity1 { + if f.get_min_version() == ClarityVersion::Clarity1 { let test = get_simple_test(f); let cost = test_program_cost(test, ClarityVersion::Clarity1, &mut owned_env, ix + 1); diff --git a/stackslib/src/clarity_vm/tests/epoch_switch.rs b/stackslib/src/clarity_vm/tests/epoch_switch.rs index af305f10555..25d01c4905a 100644 --- a/stackslib/src/clarity_vm/tests/epoch_switch.rs +++ b/stackslib/src/clarity_vm/tests/epoch_switch.rs @@ -130,7 +130,7 @@ fn test_vm_epoch_switch() { // impl BurnStateDB for SortitionHandleConn { - let burndb = db.index_conn(); + let burndb = db.index_handle_at_tip(); test_burnstatedb_epoch(&burndb, start_height, end_height, 8, 12, 16); } diff --git a/stackslib/src/clarity_vm/tests/large_contract.rs b/stackslib/src/clarity_vm/tests/large_contract.rs index 147eadc71bf..8db6b3043a4 100644 --- a/stackslib/src/clarity_vm/tests/large_contract.rs +++ b/stackslib/src/clarity_vm/tests/large_contract.rs @@ -20,11 +20,11 @@ use clarity::vm::clarity::{ClarityConnection, TransactionConnection}; use clarity::vm::contexts::{Environment, GlobalContext, OwnedEnvironment}; use clarity::vm::contracts::Contract; use clarity::vm::costs::ExecutionCost; -use clarity::vm::database::ClarityDatabase; +use clarity::vm::database::{ClarityDatabase, HeadersDB}; use clarity::vm::errors::{CheckErrors, Error as InterpreterError, Error, RuntimeErrorType}; use clarity::vm::representations::SymbolicExpression; use clarity::vm::test_util::*; -use clarity::vm::tests::test_clarity_versions; +use clarity::vm::tests::{test_clarity_versions, BurnStateDB}; use clarity::vm::types::{ OptionalData, PrincipalData, QualifiedContractIdentifier, ResponseData, StandardPrincipalData, TypeSignature, Value, @@ -42,7 +42,7 @@ use stacks_common::util::hash::hex_bytes; use crate::chainstate::stacks::boot::{BOOT_CODE_COSTS, BOOT_CODE_COSTS_2, BOOT_CODE_COSTS_3}; use crate::chainstate::stacks::index::ClarityMarfTrieId; -use crate::clarity_vm::clarity::{ClarityInstance, Error as ClarityError}; +use crate::clarity_vm::clarity::{ClarityBlockConnection, ClarityInstance, Error as ClarityError}; use crate::clarity_vm::database::marf::MarfedKV; use crate::clarity_vm::database::MemoryBackingStore; use crate::util_lib::boot::boot_code_id; @@ -88,9 +88,33 @@ const SIMPLE_TOKENS: &str = "(define-map tokens { account: principal } { balance (token-credit! 'SM2J6ZY48GV1EZ5V2V5RB9MP66SW86PYKKQVX8X0G u200) (token-credit! .tokens u4))"; +/// Since setup_block is not called, we need to manually increment the tenure +/// height each time a new block is made. +fn new_block<'a, 'b>( + clarity: &'a mut ClarityInstance, + current: &StacksBlockId, + next: &StacksBlockId, + header_db: &'b dyn HeadersDB, + burn_state_db: &'b dyn BurnStateDB, +) -> ClarityBlockConnection<'a, 'b> { + let mut block = clarity.begin_block(current, next, header_db, burn_state_db); + block.as_free_transaction(|tx_conn| { + tx_conn + .with_clarity_db(|db| { + if db.get_clarity_epoch_version().unwrap() >= StacksEpochId::Epoch30 { + let tenure_height = db.get_tenure_height().unwrap_or(0); + db.set_tenure_height(tenure_height + 1).unwrap(); + } + Ok(()) + }) + .unwrap(); + }); + block +} + #[apply(test_clarity_versions)] fn test_simple_token_system(#[case] version: ClarityVersion, #[case] epoch: StacksEpochId) { - if epoch < StacksEpochId::Epoch2_05 { + if epoch < StacksEpochId::Epoch2_05 || version > ClarityVersion::Clarity2 { return; } let mut clarity = ClarityInstance::new(false, CHAIN_ID_TESTNET, MarfedKV::temporary()); @@ -170,7 +194,8 @@ fn test_simple_token_system(#[case] version: ClarityVersion, #[case] epoch: Stac gb.commit_block(); { - let mut block = clarity.begin_block( + let mut block = new_block( + &mut clarity, &StacksBlockId([0xfe as u8; 32]), &StacksBlockId([0 as u8; 32]), &TEST_HEADER_DB, @@ -335,7 +360,8 @@ fn test_simple_token_system(#[case] version: ClarityVersion, #[case] epoch: Stac for i in 0..25 { { - let block = clarity.begin_block( + let block = new_block( + &mut clarity, &test_block_headers(i), &test_block_headers(i + 1), &TEST_HEADER_DB, @@ -346,7 +372,8 @@ fn test_simple_token_system(#[case] version: ClarityVersion, #[case] epoch: Stac } { - let mut block = clarity.begin_block( + let mut block = new_block( + &mut clarity, &test_block_headers(25), &test_block_headers(26), &TEST_HEADER_DB, @@ -676,7 +703,8 @@ pub fn rollback_log_memory_test( .commit_block(); { - let mut conn = clarity_instance.begin_block( + let mut conn = new_block( + &mut clarity_instance, &StacksBlockId([0 as u8; 32]), &StacksBlockId([1 as u8; 32]), &TEST_HEADER_DB, @@ -746,7 +774,8 @@ pub fn let_memory_test(#[case] clarity_version: ClarityVersion, #[case] epoch_id .commit_block(); { - let mut conn = clarity_instance.begin_block( + let mut conn = new_block( + &mut clarity_instance, &StacksBlockId([0 as u8; 32]), &StacksBlockId([1 as u8; 32]), &TEST_HEADER_DB, @@ -824,7 +853,8 @@ pub fn argument_memory_test( .commit_block(); { - let mut conn = clarity_instance.begin_block( + let mut conn = new_block( + &mut clarity_instance, &StacksBlockId([0 as u8; 32]), &StacksBlockId([1 as u8; 32]), &TEST_HEADER_DB, @@ -900,7 +930,8 @@ pub fn fcall_memory_test(#[case] clarity_version: ClarityVersion, #[case] epoch_ .commit_block(); { - let mut conn = clarity_instance.begin_block( + let mut conn = new_block( + &mut clarity_instance, &StacksBlockId([0 as u8; 32]), &StacksBlockId([1 as u8; 32]), &TEST_HEADER_DB, @@ -1018,7 +1049,8 @@ pub fn ccall_memory_test(#[case] clarity_version: ClarityVersion, #[case] epoch_ .commit_block(); { - let mut conn = clarity_instance.begin_block( + let mut conn = new_block( + &mut clarity_instance, &StacksBlockId([0 as u8; 32]), &StacksBlockId([1 as u8; 32]), &TEST_HEADER_DB, diff --git a/stackslib/src/core/mempool.rs b/stackslib/src/core/mempool.rs index a1135989a40..fe75d62bd2c 100644 --- a/stackslib/src/core/mempool.rs +++ b/stackslib/src/core/mempool.rs @@ -21,7 +21,7 @@ use std::io::{Read, Write}; use std::ops::{Deref, DerefMut}; use std::path::{Path, PathBuf}; use std::str::FromStr; -use std::time::Instant; +use std::time::{Duration, Instant, SystemTime}; use std::{fs, io}; use clarity::vm::types::PrincipalData; @@ -29,14 +29,15 @@ use rand::distributions::Uniform; use rand::prelude::Distribution; use rusqlite::types::ToSql; use rusqlite::{ - Connection, Error as SqliteError, OpenFlags, OptionalExtension, Row, Rows, Transaction, - NO_PARAMS, + params, Connection, Error as SqliteError, OpenFlags, OptionalExtension, Row, Rows, Transaction, }; use siphasher::sip::SipHasher; // this is SipHash-2-4 use stacks_common::codec::{ read_next, write_next, Error as codec_error, StacksMessageCodec, MAX_MESSAGE_LEN, }; use stacks_common::types::chainstate::{BlockHeaderHash, StacksAddress, StacksBlockId}; +use stacks_common::types::sqlite::NO_PARAMS; +use stacks_common::types::MempoolCollectionBehavior; use stacks_common::util::hash::{to_hex, Sha512Trunc256Sum}; use stacks_common::util::retry::{BoundReader, RetryReader}; use stacks_common::util::{get_epoch_time_ms, get_epoch_time_secs}; @@ -71,8 +72,10 @@ use crate::util_lib::db::{ use crate::{cost_estimates, monitoring}; // maximum number of confirmations a transaction can have before it's garbage-collected -pub const MEMPOOL_MAX_TRANSACTION_AGE: u64 = 256; -pub const MAXIMUM_MEMPOOL_TX_CHAINING: u64 = 25; +pub static MEMPOOL_MAX_TRANSACTION_AGE: u64 = 256; +pub static MAXIMUM_MEMPOOL_TX_CHAINING: u64 = 25; +pub static MEMPOOL_NAKAMOTO_MAX_TRANSACTION_AGE: Duration = + Duration::from_secs(MEMPOOL_MAX_TRANSACTION_AGE * 10 * 60); // name of table for storing the counting bloom filter pub const BLOOM_COUNTER_TABLE: &'static str = "txid_bloom_counter"; @@ -298,7 +301,7 @@ pub struct MemPoolAdmitter { enum MemPoolWalkResult { Chainstate(ConsensusHash, BlockHeaderHash, u64, u64), - NoneAtHeight(ConsensusHash, BlockHeaderHash, u64), + NoneAtCoinbaseHeight(ConsensusHash, BlockHeaderHash, u64), Done, } @@ -321,8 +324,15 @@ impl MemPoolAdmitter { tx: &StacksTransaction, tx_size: u64, ) -> Result<(), MemPoolRejection> { + let sortition_id = match SortitionDB::get_sortition_id_by_consensus( + &sortdb.conn(), + &self.cur_consensus_hash, + ) { + Ok(Some(x)) => x, + _ => return Err(MemPoolRejection::DBError(db_error::NotFoundError)), + }; chainstate.will_admit_mempool_tx( - &sortdb.index_conn(), + &sortdb.index_handle(&sortition_id), &self.cur_consensus_hash, &self.cur_block, tx, @@ -422,9 +432,19 @@ pub struct MemPoolTxMetadata { pub txid: Txid, pub len: u64, pub tx_fee: u64, - pub consensus_hash: ConsensusHash, - pub block_header_hash: BlockHeaderHash, - pub block_height: u64, + /// The tenure ID in which this transaction was accepted. + /// In epoch 2.x, this is the consensus hash of the sortition that chose the Stacks block + /// In Nakamoto, this is the consensus hash of the ongoing tenure. + pub tenure_consensus_hash: ConsensusHash, + /// The tenure block in which this transaction was accepted. + /// In epoch 2.x, this is the hash of the Stacks block produced in the sortition. + /// In Nakamoto, this is the hash of the tenure-start block. + pub tenure_block_header_hash: BlockHeaderHash, + /// The number of coinbases that have transpired at the time of this transaction's acceptance. + /// In epoch 2.x, this is the same as the Stacks block height + /// In Nakamoto, this is the simply the number of coinbases produced in the history tipped at + /// `tenure_consensus_hash` and `tenure_block_header_hash` + pub coinbase_height: u64, pub origin_address: StacksAddress, pub origin_nonce: u64, pub sponsor_address: StacksAddress, @@ -554,10 +574,10 @@ impl FromRow for Txid { impl FromRow for MemPoolTxMetadata { fn from_row<'a>(row: &'a Row) -> Result { let txid = Txid::from_column(row, "txid")?; - let consensus_hash = ConsensusHash::from_column(row, "consensus_hash")?; - let block_header_hash = BlockHeaderHash::from_column(row, "block_header_hash")?; + let tenure_consensus_hash = ConsensusHash::from_column(row, "consensus_hash")?; + let tenure_block_header_hash = BlockHeaderHash::from_column(row, "block_header_hash")?; let tx_fee = u64::from_column(row, "tx_fee")?; - let block_height = u64::from_column(row, "height")?; + let coinbase_height = u64::from_column(row, "height")?; let len = u64::from_column(row, "length")?; let accept_time = u64::from_column(row, "accept_time")?; let origin_address = StacksAddress::from_column(row, "origin_address")?; @@ -571,9 +591,9 @@ impl FromRow for MemPoolTxMetadata { txid, len, tx_fee, - consensus_hash, - block_header_hash, - block_height, + tenure_consensus_hash, + tenure_block_header_hash, + coinbase_height, origin_address, origin_nonce, sponsor_address, @@ -647,8 +667,13 @@ const MEMPOOL_INITIAL_SCHEMA: &'static [&'static str] = &[r#" tx_fee INTEGER NOT NULL, length INTEGER NOT NULL, consensus_hash TEXT NOT NULL, + -- In epoch2x, this is the Stacks tip block hash at the time of this tx's arrival. + -- In Nakamoto, this is the tenure-start block hash of the ongoing tenure at the time of this tx's arrival. block_header_hash TEXT NOT NULL, - height INTEGER NOT NULL, -- stacks block height + -- This is the *coinbase height* of the chain tip above. + -- In epoch2x (when this schema was written), this also happened to be the block height; hence the name. + -- In Nakamoto, this is not a block height any longer. + height INTEGER NOT NULL, accept_time INTEGER NOT NULL, tx BLOB NOT NULL, PRIMARY KEY (txid), @@ -845,15 +870,18 @@ impl<'a> MemPoolTx<'a> { self.tx.commit().map_err(db_error::SqliteError) } - /// Remove all txids at the given height from the bloom counter. + /// Remove all txids at the given coinbase height from the bloom counter. /// Used to clear out txids that are now outside the bloom counter's depth. - fn prune_bloom_counter(&mut self, target_height: u64) -> Result<(), MemPoolRejection> { + fn prune_bloom_counter(&mut self, target_coinbase_height: u64) -> Result<(), MemPoolRejection> { let sql = "SELECT a.txid FROM mempool AS a LEFT OUTER JOIN removed_txids AS b ON a.txid = b.txid WHERE b.txid IS NULL AND a.height = ?1"; - let args: &[&dyn ToSql] = &[&u64_to_sql(target_height)?]; + let args = params![u64_to_sql(target_coinbase_height)?]; let txids: Vec = query_rows(&self.tx, sql, args)?; let _num_txs = txids.len(); - test_debug!("Prune bloom counter from height {}", target_height); + test_debug!( + "Prune bloom counter from coinbase height {}", + target_coinbase_height + ); // keep borrow-checker happy MemPoolTx::with_bloom_state(self, |ref mut dbtx, ref mut bloom_counter| { @@ -861,7 +889,7 @@ impl<'a> MemPoolTx<'a> { bloom_counter.remove_raw(dbtx, &txid.0)?; let sql = "INSERT OR REPLACE INTO removed_txids (txid) VALUES (?1)"; - let args: &[&dyn ToSql] = &[&txid]; + let args = params![txid]; dbtx.execute(sql, args).map_err(db_error::SqliteError)?; } // help the type inference out @@ -870,8 +898,8 @@ impl<'a> MemPoolTx<'a> { })?; test_debug!( - "Pruned bloom filter at height {}: removed {} txs", - target_height, + "Pruned bloom filter at coinbase height {}: removed {} txs", + target_coinbase_height, _num_txs ); Ok(()) @@ -879,26 +907,26 @@ impl<'a> MemPoolTx<'a> { /// Add the txid to the bloom counter in the mempool DB, optionally replacing a prior /// transaction (identified by prior_txid) if the bloom counter is full. - /// If this is the first txid at this block height, then also garbage-collect the bloom counter to remove no-longer-recent transactions. + /// If this is the first txid at this coinbase height, then also garbage-collect the bloom counter to remove no-longer-recent transactions. /// If the bloom counter is saturated -- i.e. it represents more than MAX_BLOOM_COUNTER_TXS /// transactions -- then pick another transaction to evict from the bloom filter and return its txid. /// (Note that no transactions are ever removed from the mempool; we just don't prioritize them /// in the bloom filter). fn update_bloom_counter( &mut self, - height: u64, + coinbase_height: u64, txid: &Txid, prior_txid: Option, ) -> Result, MemPoolRejection> { - // is this the first-ever txid at this height? + // is this the first-ever txid at this coinbase height? let sql = "SELECT 1 FROM mempool WHERE height = ?1"; - let args: &[&dyn ToSql] = &[&u64_to_sql(height)?]; + let args = params![u64_to_sql(coinbase_height)?]; let present: Option = query_row(&self.tx, sql, args)?; - if present.is_none() && height > (BLOOM_COUNTER_DEPTH as u64) { - // this is the first-ever tx at this height. + if present.is_none() && coinbase_height > (BLOOM_COUNTER_DEPTH as u64) { + // this is the first-ever tx at this coinbase height. // which means, the bloom filter window has advanced. // which means, we need to remove all the txs that are now out of the window. - self.prune_bloom_counter(height - (BLOOM_COUNTER_DEPTH as u64))?; + self.prune_bloom_counter(coinbase_height - (BLOOM_COUNTER_DEPTH as u64))?; } MemPoolTx::with_bloom_state(self, |ref mut dbtx, ref mut bloom_counter| { @@ -915,15 +943,15 @@ impl<'a> MemPoolTx<'a> { // remove lowest-fee tx (they're paying the least, so replication is // deprioritized) let sql = "SELECT a.txid FROM mempool AS a LEFT OUTER JOIN removed_txids AS b ON a.txid = b.txid WHERE b.txid IS NULL AND a.height > ?1 ORDER BY a.tx_fee ASC LIMIT 1"; - let args: &[&dyn ToSql] = &[&u64_to_sql( - height.saturating_sub(BLOOM_COUNTER_DEPTH as u64), + let args = params![u64_to_sql( + coinbase_height.saturating_sub(BLOOM_COUNTER_DEPTH as u64), )?]; let evict_txid: Option = query_row(&dbtx, sql, args)?; if let Some(evict_txid) = evict_txid { bloom_counter.remove_raw(dbtx, &evict_txid.0)?; let sql = "INSERT OR REPLACE INTO removed_txids (txid) VALUES (?1)"; - let args: &[&dyn ToSql] = &[&evict_txid]; + let args = params![evict_txid]; dbtx.execute(sql, args).map_err(db_error::SqliteError)?; Some(evict_txid) @@ -953,7 +981,7 @@ impl<'a> MemPoolTx<'a> { let hashed_txid = Txid(Sha512Trunc256Sum::from_data(&randomized_buff).0); let sql = "INSERT OR REPLACE INTO randomized_txids (txid,hashed_txid) VALUES (?1,?2)"; - let args: &[&dyn ToSql] = &[txid, &hashed_txid]; + let args = params![txid, hashed_txid]; self.execute(sql, args).map_err(db_error::SqliteError)?; @@ -961,46 +989,6 @@ impl<'a> MemPoolTx<'a> { } } -impl MemPoolTxInfo { - pub fn from_tx( - tx: StacksTransaction, - consensus_hash: ConsensusHash, - block_header_hash: BlockHeaderHash, - block_height: u64, - ) -> MemPoolTxInfo { - let txid = tx.txid(); - let mut tx_data = vec![]; - tx.consensus_serialize(&mut tx_data) - .expect("BUG: failed to serialize to vector"); - - let origin_address = tx.origin_address(); - let origin_nonce = tx.get_origin_nonce(); - let (sponsor_address, sponsor_nonce) = - if let (Some(addr), Some(nonce)) = (tx.sponsor_address(), tx.get_sponsor_nonce()) { - (addr, nonce) - } else { - (origin_address.clone(), origin_nonce) - }; - - let metadata = MemPoolTxMetadata { - txid, - len: tx_data.len() as u64, - tx_fee: tx.get_tx_fee(), - consensus_hash, - block_header_hash, - block_height, - origin_address, - origin_nonce, - sponsor_address, - sponsor_nonce, - accept_time: get_epoch_time_secs(), - last_known_origin_nonce: None, - last_known_sponsor_nonce: None, - }; - MemPoolTxInfo { tx, metadata } - } -} - /// Used to locally cache nonces to avoid repeatedly looking them up in the nonce. struct NonceCache { cache: HashMap, @@ -1112,7 +1100,7 @@ fn db_set_nonce(conn: &DBConn, address: &StacksAddress, nonce: u64) -> Result<() let nonce_i64 = u64_to_sql(nonce)?; let sql = "INSERT OR REPLACE INTO nonces (address, nonce) VALUES (?1, ?2)"; - conn.execute(sql, rusqlite::params![&addr_str, nonce_i64])?; + conn.execute(sql, params![addr_str, nonce_i64])?; Ok(()) } @@ -1120,7 +1108,7 @@ fn db_get_nonce(conn: &DBConn, address: &StacksAddress) -> Result, d let addr_str = address.to_string(); let sql = "SELECT nonce FROM nonces WHERE address = ?"; - query_row(conn, sql, rusqlite::params![&addr_str]) + query_row(conn, sql, params![addr_str]) } #[cfg(test)] @@ -1262,7 +1250,7 @@ impl MemPoolDB { let version = conn .query_row( "SELECT MAX(version) FROM schema_version", - rusqlite::NO_PARAMS, + NO_PARAMS, |row| row.get(0), ) .optional()?; @@ -1479,27 +1467,27 @@ impl MemPoolDB { pub fn reset_nonce_cache(&mut self) -> Result<(), db_error> { debug!("reset nonce cache"); let sql = "DELETE FROM nonces"; - self.db.execute(sql, rusqlite::NO_PARAMS)?; + self.db.execute(sql, NO_PARAMS)?; Ok(()) } /// Find the origin addresses who have sent the highest-fee transactions fn find_origin_addresses_by_descending_fees( &self, - start_height: i64, - end_height: i64, + start_coinbase_height: i64, + end_coinbase_height: i64, min_fees: u64, offset: u32, count: u32, ) -> Result, db_error> { let sql = "SELECT DISTINCT origin_address FROM mempool WHERE height > ?1 AND height <= ?2 AND tx_fee >= ?3 ORDER BY tx_fee DESC LIMIT ?4 OFFSET ?5"; - let args: &[&dyn ToSql] = &[ - &start_height, - &end_height, - &u64_to_sql(min_fees)?, - &count, - &offset, + let args = params![ + start_coinbase_height, + end_coinbase_height, + u64_to_sql(min_fees)?, + count, + offset, ]; query_row_columns(self.conn(), sql, args, "origin_address") } @@ -1519,7 +1507,7 @@ impl MemPoolDB { let txs: Vec = query_rows( &sql_tx, "SELECT * FROM mempool as m WHERE m.fee_rate IS NULL LIMIT ?", - &[max_updates], + params![max_updates], )?; let mut updated = 0; for tx_to_estimate in txs { @@ -1544,7 +1532,7 @@ impl MemPoolDB { sql_tx.execute( "UPDATE mempool SET fee_rate = ? WHERE txid = ?", - rusqlite::params![fee_rate_f64, &txid], + params![fee_rate_f64, txid], )?; updated += 1; } @@ -1602,7 +1590,6 @@ impl MemPoolDB { &mut self, clarity_tx: &mut C, output_events: &mut Vec, - _tip_height: u64, settings: MemPoolWalkSettings, mut todo: F, ) -> Result @@ -1741,20 +1728,26 @@ impl MemPoolDB { ) { Ordering::Less => { debug!( - "Mempool: unexecutable: drop tx {}:{} ({})", - candidate.origin_address, - candidate.origin_nonce, - candidate.fee_rate.unwrap_or_default() + "Mempool: unexecutable: drop tx"; + "txid" => %candidate.txid, + "tx_origin_addr" => %candidate.origin_address, + "tx_origin_nonce" => candidate.origin_nonce, + "fee_rate" => candidate.fee_rate.unwrap_or_default(), + "expected_origin_nonce" => expected_origin_nonce, + "expected_sponsor_nonce" => expected_sponsor_nonce, ); // This transaction cannot execute in this pass, just drop it continue; } Ordering::Greater => { debug!( - "Mempool: nonces too high, cached for later {}:{} ({})", - candidate.origin_address, - candidate.origin_nonce, - candidate.fee_rate.unwrap_or_default() + "Mempool: nonces too high, cached for later"; + "txid" => %candidate.txid, + "tx_origin_addr" => %candidate.origin_address, + "tx_origin_nonce" => candidate.origin_nonce, + "fee_rate" => candidate.fee_rate.unwrap_or_default(), + "expected_origin_nonce" => expected_origin_nonce, + "expected_sponsor_nonce" => expected_sponsor_nonce, ); // This transaction could become runnable in this pass, save it for later candidate_cache.push(candidate); @@ -1912,7 +1905,7 @@ impl MemPoolDB { debug!( "Mempool iteration finished"; - "considered_txs" => total_considered, + "considered_txs" => u128::from(total_considered), "elapsed_ms" => start_time.elapsed().as_millis() ); Ok(total_considered) @@ -1932,20 +1925,12 @@ impl MemPoolDB { } pub fn db_has_tx(conn: &DBConn, txid: &Txid) -> Result { - query_row( - conn, - "SELECT 1 FROM mempool WHERE txid = ?1", - &[txid as &dyn ToSql], - ) - .and_then(|row_opt: Option| Ok(row_opt.is_some())) + query_row(conn, "SELECT 1 FROM mempool WHERE txid = ?1", params![txid]) + .and_then(|row_opt: Option| Ok(row_opt.is_some())) } pub fn get_tx(conn: &DBConn, txid: &Txid) -> Result, db_error> { - query_row( - conn, - "SELECT * FROM mempool WHERE txid = ?1", - &[txid as &dyn ToSql], - ) + query_row(conn, "SELECT * FROM mempool WHERE txid = ?1", params![txid]) } /// Get all transactions across all tips @@ -1964,33 +1949,13 @@ impl MemPoolDB { block_header_hash: &BlockHeaderHash, ) -> Result { let sql = "SELECT * FROM mempool WHERE consensus_hash = ?1 AND block_header_hash = ?2"; - let args: &[&dyn ToSql] = &[consensus_hash, block_header_hash]; + let args = params![consensus_hash, block_header_hash]; let rows = query_rows::(conn, &sql, args)?; Ok(rows.len()) } - /// Get all transactions at a particular timestamp on a given chain tip. - /// Order them by origin nonce. - pub fn get_txs_at( - conn: &DBConn, - consensus_hash: &ConsensusHash, - block_header_hash: &BlockHeaderHash, - timestamp: u64, - ) -> Result, db_error> { - let sql = "SELECT * FROM mempool WHERE accept_time = ?1 AND consensus_hash = ?2 AND block_header_hash = ?3 ORDER BY origin_nonce ASC"; - let args: &[&dyn ToSql] = &[&u64_to_sql(timestamp)?, consensus_hash, block_header_hash]; - let rows = query_rows::(conn, &sql, args)?; - Ok(rows) - } - - /// Given a chain tip, find the highest block-height from _before_ this tip - pub fn get_previous_block_height(conn: &DBConn, height: u64) -> Result, db_error> { - let sql = "SELECT height FROM mempool WHERE height < ?1 ORDER BY height DESC LIMIT 1"; - let args: &[&dyn ToSql] = &[&u64_to_sql(height)?]; - query_row(conn, sql, args) - } - /// Get a number of transactions after a given timestamp on a given chain tip. + #[cfg(test)] pub fn get_txs_after( conn: &DBConn, consensus_hash: &ConsensusHash, @@ -1999,11 +1964,11 @@ impl MemPoolDB { count: u64, ) -> Result, db_error> { let sql = "SELECT * FROM mempool WHERE accept_time >= ?1 AND consensus_hash = ?2 AND block_header_hash = ?3 ORDER BY tx_fee DESC LIMIT ?4"; - let args: &[&dyn ToSql] = &[ - &u64_to_sql(timestamp)?, + let args = params![ + u64_to_sql(timestamp)?, consensus_hash, block_header_hash, - &u64_to_sql(count)?, + u64_to_sql(count)?, ]; let rows = query_rows::(conn, &sql, args)?; Ok(rows) @@ -2019,7 +1984,7 @@ impl MemPoolDB { nonce: u64, ) -> Result, db_error> { let sql = format!( - "SELECT + "SELECT txid, origin_address, origin_nonce, @@ -2036,10 +2001,13 @@ impl MemPoolDB { FROM mempool WHERE {0}_address = ?1 AND {0}_nonce = ?2", if is_origin { "origin" } else { "sponsor" } ); - let args: &[&dyn ToSql] = &[&addr.to_string(), &u64_to_sql(nonce)?]; + let args = params![addr.to_string(), u64_to_sql(nonce)?]; query_row(conn, &sql, args) } + /// Are the given fully-qualified blocks, identified by their (consensus-hash, block-header-hash) pairs, in the same fork? + /// That is, is one block an ancestor of another? + /// TODO: Nakamoto-ize fn are_blocks_in_same_fork( chainstate: &mut StacksChainState, first_consensus_hash: &ConsensusHash, @@ -2054,9 +2022,7 @@ impl MemPoolDB { return Ok(true); } - let headers_conn = &chainstate - .index_conn() - .map_err(|_e| db_error::Other("ChainstateError".to_string()))?; + let headers_conn = &chainstate.index_conn(); let height_of_first_with_second_tip = headers_conn.get_ancestor_block_height(&second_block, &first_block)?; let height_of_second_with_first_tip = @@ -2074,17 +2040,33 @@ impl MemPoolDB { /// Add a transaction to the mempool. If it already exists, then replace it if the given fee /// is higher than the one that's already there. /// Carry out the mempool admission test before adding. + /// + /// `tip_consensus_hash`, `tip_block_header_hash`, and `coinbase_height` describe the fork that + /// was canonical when this transaction is added. While `coinbase_height` would be derived + /// from these first two fields, it is supplied independently to facilitate testing. + /// + /// If this is called in the Nakamoto epoch -- i.e. if `tip_consensus_hash` is in the Nakamoto + /// epoch -- then these tip hashes will be resolved to the tenure-start hashes first. This is + /// because in Nakamoto, we index transactions by tenure-start blocks since they directly + /// correspond to epoch 2.x Stacks blocks (meaning, the semantics of mempool sync are preserved + /// across epoch 2.x and Nakamoto as long as we treat transactions this way). In both epochs, + /// transactions arrive during a miner's tenure, not during a particular block's status as + /// the canonical chain tip. + /// + /// The tenure resolution behavior can be short-circuited with `resolve_tenure = false`. + /// However, this is only used in testing. + /// /// Don't call directly; use submit(). - /// This is `pub` only for testing. - pub fn try_add_tx( + pub(crate) fn try_add_tx( tx: &mut MemPoolTx, chainstate: &mut StacksChainState, - consensus_hash: &ConsensusHash, - block_header_hash: &BlockHeaderHash, + tip_consensus_hash: &ConsensusHash, + tip_block_header_hash: &BlockHeaderHash, + resolve_tenure: bool, txid: Txid, tx_bytes: Vec, tx_fee: u64, - height: u64, + coinbase_height: u64, origin_address: &StacksAddress, origin_nonce: u64, sponsor_address: &StacksAddress, @@ -2093,6 +2075,32 @@ impl MemPoolDB { ) -> Result<(), MemPoolRejection> { let length = tx_bytes.len() as u64; + // this transaction is said to arrive during this _tenure_, not during this _block_. + // In epoch 2.x, these are the same as `tip_consensus_hash` and `tip_block_header_hash`. + // In Nakamoto, they may be different. + // + // The only exception to this rule is if `tip_consensus_hash` and `tip_block_header_hash` + // are `FIRST_BURNCHAIN_CONSENSUS_HASH` and `FIRST_STACKS_BLOCK_HASH` -- in this case, + // there's no need to find the tenure-start header + let (consensus_hash, block_header_hash) = if resolve_tenure { + let tenure_start_header = NakamotoChainState::get_tenure_start_block_header( + &mut chainstate.index_conn(), + &StacksBlockId::new(tip_consensus_hash, tip_block_header_hash), + tip_consensus_hash, + ) + .map_err(|e| MemPoolRejection::FailedToValidate(e))? + .ok_or(MemPoolRejection::NoSuchChainTip( + tip_consensus_hash.clone(), + tip_block_header_hash.clone(), + ))?; + + let consensus_hash = tenure_start_header.consensus_hash; + let block_header_hash = tenure_start_header.anchored_header.block_hash(); + (consensus_hash, block_header_hash) + } else { + (tip_consensus_hash.clone(), tip_block_header_hash.clone()) + }; + // do we already have txs with either the same origin nonce or sponsor nonce ? let prior_tx = { match MemPoolDB::get_tx_metadata_by_address(tx, true, origin_address, origin_nonce)? { @@ -2120,10 +2128,10 @@ impl MemPoolDB { true } else if !MemPoolDB::are_blocks_in_same_fork( chainstate, - &prior_tx.consensus_hash, - &prior_tx.block_header_hash, - consensus_hash, - block_header_hash, + &prior_tx.tenure_consensus_hash, + &prior_tx.tenure_block_header_hash, + &consensus_hash, + &block_header_hash, )? { // is this a replace-across-fork ? debug!( @@ -2154,7 +2162,11 @@ impl MemPoolDB { return Err(MemPoolRejection::ConflictingNonceInMempool); } - tx.update_bloom_counter(height, &txid, prior_tx.as_ref().map(|tx| tx.txid.clone()))?; + tx.update_bloom_counter( + coinbase_height, + &txid, + prior_tx.as_ref().map(|tx| tx.txid.clone()), + )?; let sql = "INSERT OR REPLACE INTO mempool ( txid, @@ -2171,19 +2183,19 @@ impl MemPoolDB { tx) VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7, ?8, ?9, ?10, ?11, ?12)"; - let args: &[&dyn ToSql] = &[ - &txid, - &origin_address.to_string(), - &u64_to_sql(origin_nonce)?, - &sponsor_address.to_string(), - &u64_to_sql(sponsor_nonce)?, - &u64_to_sql(tx_fee)?, - &u64_to_sql(length)?, + let args = params![ + txid, + origin_address.to_string(), + u64_to_sql(origin_nonce)?, + sponsor_address.to_string(), + u64_to_sql(sponsor_nonce)?, + u64_to_sql(tx_fee)?, + u64_to_sql(length)?, consensus_hash, block_header_hash, - &u64_to_sql(height)?, - &u64_to_sql(get_epoch_time_secs())?, - &tx_bytes, + u64_to_sql(coinbase_height)?, + u64_to_sql(get_epoch_time_secs())?, + tx_bytes, ]; tx.execute(sql, args) @@ -2199,62 +2211,86 @@ impl MemPoolDB { Ok(()) } - /// Garbage-collect the mempool. Remove transactions that have a given number of - /// confirmations. + /// Garbage-collect the mempool according to the behavior specified in `behavior`. pub fn garbage_collect( - tx: &mut MemPoolTx, - min_height: u64, + &mut self, + chain_height: u64, + behavior: &MempoolCollectionBehavior, event_observer: Option<&dyn MemPoolEventDispatcher>, ) -> Result<(), db_error> { - let args: &[&dyn ToSql] = &[&u64_to_sql(min_height)?]; + let tx = self.tx_begin()?; + match behavior { + MempoolCollectionBehavior::ByStacksHeight => { + // NOTE: this is the epoch2x behavior, so `chain_height` is 1-to-1 with coinbase + // height. This will not be true in Nakamoto! + let Some(min_height) = chain_height.checked_sub(MEMPOOL_MAX_TRANSACTION_AGE) else { + return Ok(()); + }; + Self::garbage_collect_by_coinbase_height(&tx, min_height, event_observer)?; + } + MempoolCollectionBehavior::ByReceiveTime => { + Self::garbage_collect_by_time( + &tx, + &MEMPOOL_NAKAMOTO_MAX_TRANSACTION_AGE, + event_observer, + )?; + } + }; + tx.commit() + } + /// Garbage-collect the mempool. Remove transactions that were accepted more than `age` ago. + /// The granularity of this check is in seconds. + pub fn garbage_collect_by_time( + tx: &MemPoolTx, + age: &Duration, + event_observer: Option<&dyn MemPoolEventDispatcher>, + ) -> Result<(), db_error> { + let threshold_time = get_epoch_time_secs().saturating_sub(age.as_secs()); + let args = params![u64_to_sql(threshold_time)?]; if let Some(event_observer) = event_observer { - let sql = "SELECT txid FROM mempool WHERE height < ?1"; + let sql = "SELECT txid FROM mempool WHERE accept_time < ?1"; let txids = query_rows(tx, sql, args)?; event_observer.mempool_txs_dropped(txids, MemPoolDropReason::STALE_COLLECT); } - let sql = "DELETE FROM mempool WHERE height < ?1"; + let sql = "DELETE FROM mempool WHERE accept_time < ?1"; tx.execute(sql, args)?; increment_stx_mempool_gc(); Ok(()) } - #[cfg(test)] - pub fn clear_before_height(&mut self, min_height: u64) -> Result<(), db_error> { - let mut tx = self.tx_begin()?; - MemPoolDB::garbage_collect(&mut tx, min_height, None)?; - tx.commit()?; + /// Garbage-collect the mempool. Remove transactions that were received `min_coinbase_height` + /// blocks ago. + pub fn garbage_collect_by_coinbase_height( + tx: &MemPoolTx, + min_coinbase_height: u64, + event_observer: Option<&dyn MemPoolEventDispatcher>, + ) -> Result<(), db_error> { + let args = params![u64_to_sql(min_coinbase_height)?]; + + if let Some(event_observer) = event_observer { + let sql = "SELECT txid FROM mempool WHERE height < ?1"; + let txids = query_rows(tx, sql, args)?; + event_observer.mempool_txs_dropped(txids, MemPoolDropReason::STALE_COLLECT); + } + + let sql = "DELETE FROM mempool WHERE height < ?1"; + + tx.execute(sql, args)?; + increment_stx_mempool_gc(); Ok(()) } - /// Scan the chain tip for all available transactions (but do not remove them!) - pub fn poll( + #[cfg(test)] + pub fn clear_before_coinbase_height( &mut self, - consensus_hash: &ConsensusHash, - block_hash: &BlockHeaderHash, - ) -> Vec { - test_debug!("Mempool poll at {}/{}", consensus_hash, block_hash); - MemPoolDB::get_txs_after( - &self.db, - consensus_hash, - block_hash, - 0, - (i64::MAX - 1) as u64, - ) - .unwrap_or(vec![]) - .into_iter() - .map(|tx_info| { - test_debug!( - "Mempool poll {} at {}/{}", - &tx_info.tx.txid(), - consensus_hash, - block_hash - ); - tx_info.tx - }) - .collect() + min_coinbase_height: u64, + ) -> Result<(), db_error> { + let tx = self.tx_begin()?; + MemPoolDB::garbage_collect_by_coinbase_height(&tx, min_coinbase_height, None)?; + tx.commit() } /// Submit a transaction to the mempool at a particular chain tip. @@ -2277,7 +2313,8 @@ impl MemPoolDB { ); let block_id = StacksBlockId::new(consensus_hash, block_hash); - let height = match NakamotoChainState::get_block_header(chainstate.db(), &block_id) { + let coinbase_height = match NakamotoChainState::get_block_header(chainstate.db(), &block_id) + { Ok(Some(header)) => header.stacks_block_height, Ok(None) => { if *consensus_hash == FIRST_BURNCHAIN_CONSENSUS_HASH { @@ -2327,10 +2364,11 @@ impl MemPoolDB { chainstate, &consensus_hash, &block_hash, + true, txid.clone(), tx_data, tx_fee, - height, + coinbase_height, &origin_address, origin_nonce, &sponsor_address, @@ -2341,7 +2379,7 @@ impl MemPoolDB { mempool_tx .execute( "UPDATE mempool SET fee_rate = ? WHERE txid = ?", - rusqlite::params![fee_rate_estimate, &txid], + params![fee_rate_estimate, txid], ) .map_err(db_error::from)?; @@ -2352,7 +2390,20 @@ impl MemPoolDB { Ok(()) } - /// One-shot submit + /// One-shot transaction submit. + /// + /// Transactions are indexed relative to a chain tip, identified by `consensus_hash` and + /// `block_hash`. These fields have slightly different interpretations depending on what epoch + /// we're in: + /// * In epoch 2.x, these are the Stacks chain tip. + /// * In Nakamoto, these will be resolved to the tenure-start block of the tenure in which this + /// Stacks block lies. The reason for this is because of how the mempool performs + /// garbage collection in its DB and bloom filter -- the latter of which is used for mempool + /// sync. + /// + /// No action is required by te caller to handle this discrepancy; the caller should just submit + /// the canonical Stacks tip. If the current epoch is a Nakamoto epoch, it will be resolved to + /// the tenure-start block internally. pub fn submit( &mut self, chainstate: &mut StacksChainState, @@ -2437,8 +2488,7 @@ impl MemPoolDB { } /// Directly submit to the mempool, and don't do any admissions checks. - /// This method is only used during testing, but because it is used by the - /// integration tests, it cannot be marked #[cfg(test)]. + #[cfg(any(test, feature = "testing"))] pub fn submit_raw( &mut self, chainstate: &mut StacksChainState, @@ -2505,7 +2555,7 @@ impl MemPoolDB { ) -> Result<(), db_error> { for txid in txids { let sql = "INSERT OR REPLACE INTO tx_blacklist (txid, arrival_time) VALUES (?1, ?2)"; - let args: &[&dyn ToSql] = &[&txid, &u64_to_sql(now)?]; + let args = params![txid, &u64_to_sql(now)?]; tx.execute(sql, args)?; } Ok(()) @@ -2520,7 +2570,7 @@ impl MemPoolDB { max_size: u64, ) -> Result<(), db_error> { let sql = "DELETE FROM tx_blacklist WHERE arrival_time + ?1 < ?2"; - let args: &[&dyn ToSql] = &[&u64_to_sql(timeout)?, &u64_to_sql(now)?]; + let args = params![u64_to_sql(timeout)?, u64_to_sql(now)?]; tx.execute(sql, args)?; // if we get too big, then drop some txs at random @@ -2531,13 +2581,10 @@ impl MemPoolDB { let txids: Vec = query_rows( tx, "SELECT txid FROM tx_blacklist ORDER BY RANDOM() LIMIT ?1", - &[&u64_to_sql(to_delete)? as &dyn ToSql], + params![u64_to_sql(to_delete)?], )?; for txid in txids.into_iter() { - tx.execute( - "DELETE FROM tx_blacklist WHERE txid = ?1", - &[&txid as &dyn ToSql], - )?; + tx.execute("DELETE FROM tx_blacklist WHERE txid = ?1", params![txid])?; } } Ok(()) @@ -2549,7 +2596,7 @@ impl MemPoolDB { txid: &Txid, ) -> Result, db_error> { let sql = "SELECT arrival_time FROM tx_blacklist WHERE txid = ?1"; - let args: &[&dyn ToSql] = &[&txid]; + let args = params![txid]; query_row(conn, sql, args) } @@ -2648,8 +2695,8 @@ impl MemPoolDB { self.bloom_counter.to_bloom_filter(&self.conn()) } - /// Find maximum height represented in the mempool - pub fn get_max_height(conn: &DBConn) -> Result, db_error> { + /// Find maximum Stacks coinbase height represented in the mempool. + pub fn get_max_coinbase_height(conn: &DBConn) -> Result, db_error> { let sql = "SELECT 1 FROM mempool WHERE height >= 0"; let count = query_rows::(conn, sql, NO_PARAMS)?.len(); if count == 0 { @@ -2663,7 +2710,7 @@ impl MemPoolDB { /// Get the transaction ID list that represents the set of transactions that are represented in /// the bloom counter. pub fn get_bloom_txids(&self) -> Result, db_error> { - let max_height = match MemPoolDB::get_max_height(&self.conn())? { + let max_height = match MemPoolDB::get_max_coinbase_height(&self.conn())? { Some(h) => h, None => { // mempool is empty @@ -2672,7 +2719,7 @@ impl MemPoolDB { }; let min_height = max_height.saturating_sub(BLOOM_COUNTER_DEPTH as u64); let sql = "SELECT mempool.txid FROM mempool WHERE height > ?1 AND height <= ?2 AND NOT EXISTS (SELECT 1 FROM removed_txids WHERE txid = mempool.txid)"; - let args: &[&dyn ToSql] = &[&u64_to_sql(min_height)?, &u64_to_sql(max_height)?]; + let args = params![u64_to_sql(min_height)?, u64_to_sql(max_height)?]; query_rows(&self.conn(), sql, args) } @@ -2688,10 +2735,10 @@ impl MemPoolDB { }) } - /// How many recent transactions are there -- i.e. within BLOOM_COUNTER_DEPTH block heights of + /// How many recent transactions are there -- i.e. within BLOOM_COUNTER_DEPTH coinbase heights of /// the chain tip? pub fn get_num_recent_txs(conn: &DBConn) -> Result { - let max_height = match MemPoolDB::get_max_height(conn)? { + let max_height = match MemPoolDB::get_max_coinbase_height(conn)? { Some(h) => h, None => { // mempool is empty @@ -2700,7 +2747,7 @@ impl MemPoolDB { }; let min_height = max_height.saturating_sub(BLOOM_COUNTER_DEPTH as u64); let sql = "SELECT COUNT(txid) FROM mempool WHERE height > ?1 AND height <= ?2"; - let args: &[&dyn ToSql] = &[&u64_to_sql(min_height)?, &u64_to_sql(max_height)?]; + let args = params![u64_to_sql(min_height)?, u64_to_sql(max_height)?]; query_int(conn, sql, args).map(|cnt| cnt as u64) } @@ -2721,14 +2768,14 @@ impl MemPoolDB { /// Get the hashed txid for a txid pub fn get_randomized_txid(&self, txid: &Txid) -> Result, db_error> { let sql = "SELECT hashed_txid FROM randomized_txids WHERE txid = ?1 LIMIT 1"; - let args: &[&dyn ToSql] = &[txid]; + let args = params![txid]; query_row(&self.conn(), sql, args) } pub fn find_next_missing_transactions( &self, data: &MemPoolSyncData, - height: u64, + coinbase_height: u64, last_randomized_txid: &Txid, max_txs: u64, max_run: u64, @@ -2736,7 +2783,7 @@ impl MemPoolDB { Self::static_find_next_missing_transactions( self.conn(), data, - height, + coinbase_height, last_randomized_txid, max_txs, max_run, @@ -2753,7 +2800,7 @@ impl MemPoolDB { pub fn static_find_next_missing_transactions( conn: &DBConn, data: &MemPoolSyncData, - height: u64, + coinbase_height: u64, last_randomized_txid: &Txid, max_txs: u64, max_run: u64, @@ -2768,10 +2815,10 @@ impl MemPoolDB { (SELECT 1 FROM removed_txids WHERE txid = mempool.txid) \ ORDER BY randomized_txids.hashed_txid ASC LIMIT ?3"; - let args: &[&dyn ToSql] = &[ - &last_randomized_txid, - &u64_to_sql(height.saturating_sub(BLOOM_COUNTER_DEPTH as u64))?, - &u64_to_sql(max_run)?, + let args = params![ + last_randomized_txid, + u64_to_sql(coinbase_height.saturating_sub(BLOOM_COUNTER_DEPTH as u64))?, + u64_to_sql(max_run)?, ]; let mut tags_table = HashSet::new(); diff --git a/stackslib/src/core/mod.rs b/stackslib/src/core/mod.rs index 5c237b90caa..ade8a825899 100644 --- a/stackslib/src/core/mod.rs +++ b/stackslib/src/core/mod.rs @@ -40,44 +40,18 @@ pub type StacksEpoch = GenericStacksEpoch; pub const SYSTEM_FORK_SET_VERSION: [u8; 4] = [23u8, 0u8, 0u8, 0u8]; // chain id -pub use stacks_common::consts::{CHAIN_ID_MAINNET, CHAIN_ID_TESTNET, STACKS_EPOCH_MAX}; - -// peer version (big-endian) -// first byte == major network protocol version (currently 0x18) -// second and third bytes are unused -// fourth byte == highest epoch supported by this node -pub const PEER_VERSION_MAINNET_MAJOR: u32 = 0x18000000; -pub const PEER_VERSION_TESTNET_MAJOR: u32 = 0xfacade00; - -pub const PEER_VERSION_EPOCH_1_0: u8 = 0x00; -pub const PEER_VERSION_EPOCH_2_0: u8 = 0x00; -pub const PEER_VERSION_EPOCH_2_05: u8 = 0x05; -pub const PEER_VERSION_EPOCH_2_1: u8 = 0x06; -pub const PEER_VERSION_EPOCH_2_2: u8 = 0x07; -pub const PEER_VERSION_EPOCH_2_3: u8 = 0x08; -pub const PEER_VERSION_EPOCH_2_4: u8 = 0x09; -pub const PEER_VERSION_EPOCH_2_5: u8 = 0x0a; -pub const PEER_VERSION_EPOCH_3_0: u8 = 0x0b; - -// this should be updated to the latest network epoch version supported by -// this node. this will be checked by the `validate_epochs()` method. -pub const PEER_NETWORK_EPOCH: u32 = PEER_VERSION_EPOCH_2_5 as u32; - -// set the fourth byte of the peer version -pub const PEER_VERSION_MAINNET: u32 = PEER_VERSION_MAINNET_MAJOR | PEER_NETWORK_EPOCH; -pub const PEER_VERSION_TESTNET: u32 = PEER_VERSION_TESTNET_MAJOR | PEER_NETWORK_EPOCH; - -// network identifiers -pub const NETWORK_ID_MAINNET: u32 = 0x17000000; -pub const NETWORK_ID_TESTNET: u32 = 0xff000000; +pub use stacks_common::consts::{ + CHAIN_ID_MAINNET, CHAIN_ID_TESTNET, MINING_COMMITMENT_WINDOW, NETWORK_ID_MAINNET, + NETWORK_ID_TESTNET, PEER_NETWORK_EPOCH, PEER_VERSION_EPOCH_1_0, PEER_VERSION_EPOCH_2_0, + PEER_VERSION_EPOCH_2_05, PEER_VERSION_EPOCH_2_1, PEER_VERSION_EPOCH_2_2, + PEER_VERSION_EPOCH_2_3, PEER_VERSION_EPOCH_2_4, PEER_VERSION_EPOCH_2_5, PEER_VERSION_EPOCH_3_0, + PEER_VERSION_MAINNET, PEER_VERSION_MAINNET_MAJOR, PEER_VERSION_TESTNET, + PEER_VERSION_TESTNET_MAJOR, STACKS_EPOCH_MAX, +}; // default port pub const NETWORK_P2P_PORT: u16 = 6265; -// sliding burnchain window over which a miner's past block-commit payouts will be used to weight -// its current block-commit in a sortition -pub const MINING_COMMITMENT_WINDOW: u8 = 6; - // Number of previous burnchain blocks to search to find burnchain-hosted Stacks operations pub const BURNCHAIN_TX_SEARCH_WINDOW: u8 = 6; @@ -195,6 +169,10 @@ pub const POX_V3_MAINNET_EARLY_UNLOCK_HEIGHT: u32 = pub const POX_V3_TESTNET_EARLY_UNLOCK_HEIGHT: u32 = (BITCOIN_TESTNET_STACKS_25_BURN_HEIGHT as u32) + 1; +// The threshold of weighted votes on a block to approve it in Nakamoto. +// This is out of 10, so 7 means "70%". +pub const NAKAMOTO_SIGNER_BLOCK_APPROVAL_THRESHOLD: u64 = 7; + /// Burn block height at which the ASTRules::PrecheckSize becomes the default behavior on mainnet pub const AST_RULES_PRECHECK_SIZE: u64 = 752000; // on or about Aug 30 2022 @@ -1097,7 +1075,7 @@ impl StacksEpochExtension for StacksEpoch { StacksEpoch { epoch_id: StacksEpochId::Epoch24, start_height: first_burnchain_height + 20, - end_height: STACKS_EPOCH_MAX, + end_height: first_burnchain_height + 24, block_limit: ExecutionCost { write_length: 210210, write_count: 210210, @@ -1200,7 +1178,7 @@ impl StacksEpochExtension for StacksEpoch { StacksEpoch { epoch_id: StacksEpochId::Epoch24, start_height: first_burnchain_height + 20, - end_height: STACKS_EPOCH_MAX, + end_height: first_burnchain_height + 24, block_limit: ExecutionCost { write_length: 210210, write_count: 210210, diff --git a/stackslib/src/core/tests/mod.rs b/stackslib/src/core/tests/mod.rs index 02c68a99ff1..158feeeba59 100644 --- a/stackslib/src/core/tests/mod.rs +++ b/stackslib/src/core/tests/mod.rs @@ -15,6 +15,7 @@ // along with this program. If not, see . use std::collections::{HashMap, HashSet}; +use std::time::Duration; use std::{cmp, io}; use clarity::vm::costs::ExecutionCost; @@ -25,12 +26,14 @@ use clarity::vm::types::{PrincipalData, QualifiedContractIdentifier, StacksAddre use clarity::vm::{ClarityName, ContractName, Value}; use rand::prelude::*; use rand::thread_rng; +use rusqlite::params; use stacks_common::address::AddressHashMode; use stacks_common::codec::{read_next, Error as codec_error, StacksMessageCodec}; use stacks_common::types::chainstate::{ BlockHeaderHash, BurnchainHeaderHash, StacksAddress, StacksBlockId, StacksWorkScore, TrieHash, VRFSeed, }; +use stacks_common::types::{MempoolCollectionBehavior, StacksEpochId}; use stacks_common::util::hash::{hex_bytes, to_hex, Hash160, *}; use stacks_common::util::secp256k1::{MessageSignature, *}; use stacks_common::util::vrf::VRFProof; @@ -127,6 +130,7 @@ pub fn make_block( burn_header_height: burn_height as u32, burn_header_timestamp: 0, anchored_block_size: 1, + burn_view: None, }; c_tx.commit_block(); @@ -192,6 +196,7 @@ fn mempool_walk_over_fork() { 0x80000000, &TransactionAnchorMode::Any, &TransactionPostConditionMode::Allow, + StacksEpochId::latest(), ); let blocks_to_broadcast_in = [&b_1, &b_2, &b_4]; @@ -235,6 +240,7 @@ fn mempool_walk_over_fork() { &mut chainstate, &block.0, &block.1, + true, txid, tx_bytes, tx_fee, @@ -270,7 +276,6 @@ fn mempool_walk_over_fork() { .iterate_candidates::<_, ChainstateError, _>( clarity_conn, &mut tx_events, - 2, mempool_settings.clone(), |_, available_tx, _| { count_txs += 1; @@ -309,7 +314,6 @@ fn mempool_walk_over_fork() { .iterate_candidates::<_, ChainstateError, _>( clarity_conn, &mut tx_events, - 2, mempool_settings.clone(), |_, available_tx, _| { count_txs += 1; @@ -347,7 +351,6 @@ fn mempool_walk_over_fork() { .iterate_candidates::<_, ChainstateError, _>( clarity_conn, &mut tx_events, - 3, mempool_settings.clone(), |_, available_tx, _| { count_txs += 1; @@ -390,7 +393,6 @@ fn mempool_walk_over_fork() { .iterate_candidates::<_, ChainstateError, _>( clarity_conn, &mut tx_events, - 2, mempool_settings.clone(), |_, available_tx, _| { count_txs += 1; @@ -431,7 +433,6 @@ fn mempool_walk_over_fork() { .iterate_candidates::<_, ChainstateError, _>( clarity_conn, &mut tx_events, - 3, mempool_settings.clone(), |_, available_tx, _| { count_txs += 1; @@ -493,6 +494,7 @@ fn mempool_walk_over_fork() { &mut chainstate, &block.0, &block.1, + true, txid, tx_bytes, tx_fee, @@ -546,6 +548,7 @@ fn mempool_walk_over_fork() { &mut chainstate, &block.0, &block.1, + true, txid, tx_bytes, tx_fee, @@ -601,6 +604,7 @@ fn test_iterate_candidates_consider_no_estimate_tx_prob() { 0x80000000, &TransactionAnchorMode::Any, &TransactionPostConditionMode::Allow, + StacksEpochId::latest(), ); // Load 24 transactions into the mempool, alternating whether or not they have a fee-rate. @@ -624,6 +628,7 @@ fn test_iterate_candidates_consider_no_estimate_tx_prob() { &mut chainstate, &b_1.0, &b_1.1, + true, txid, tx_bytes, tx_fee, @@ -640,7 +645,7 @@ fn test_iterate_candidates_consider_no_estimate_tx_prob() { mempool_tx .execute( "UPDATE mempool SET fee_rate = ? WHERE txid = ?", - rusqlite::params![Some(123.0), &txid], + params![Some(123.0), txid], ) .unwrap(); } else { @@ -648,7 +653,7 @@ fn test_iterate_candidates_consider_no_estimate_tx_prob() { mempool_tx .execute( "UPDATE mempool SET fee_rate = ? WHERE txid = ?", - rusqlite::params![none, &txid], + params![none, txid], ) .unwrap(); } @@ -666,7 +671,6 @@ fn test_iterate_candidates_consider_no_estimate_tx_prob() { .iterate_candidates::<_, ChainstateError, _>( clarity_conn, &mut tx_events, - 2, mempool_settings.clone(), |_, available_tx, _| { count_txs += 1; @@ -704,7 +708,6 @@ fn test_iterate_candidates_consider_no_estimate_tx_prob() { .iterate_candidates::<_, ChainstateError, _>( clarity_conn, &mut tx_events, - 2, mempool_settings.clone(), |_, available_tx, _| { count_txs += 1; @@ -742,7 +745,6 @@ fn test_iterate_candidates_consider_no_estimate_tx_prob() { .iterate_candidates::<_, ChainstateError, _>( clarity_conn, &mut tx_events, - 2, mempool_settings.clone(), |_, available_tx, _| { count_txs += 1; @@ -796,6 +798,7 @@ fn test_iterate_candidates_skipped_transaction() { 0x80000000, &TransactionAnchorMode::Any, &TransactionPostConditionMode::Allow, + StacksEpochId::latest(), ); // Load 3 transactions into the mempool @@ -819,6 +822,7 @@ fn test_iterate_candidates_skipped_transaction() { &mut chainstate, &b_1.0, &b_1.1, + true, txid, tx_bytes, tx_fee, @@ -843,7 +847,6 @@ fn test_iterate_candidates_skipped_transaction() { .iterate_candidates::<_, ChainstateError, _>( clarity_conn, &mut tx_events, - 2, mempool_settings.clone(), |_, available_tx, _| { count_txs += 1; @@ -908,6 +911,7 @@ fn test_iterate_candidates_processing_error_transaction() { 0x80000000, &TransactionAnchorMode::Any, &TransactionPostConditionMode::Allow, + StacksEpochId::latest(), ); // Load 3 transactions into the mempool @@ -931,6 +935,7 @@ fn test_iterate_candidates_processing_error_transaction() { &mut chainstate, &b_1.0, &b_1.1, + true, txid, tx_bytes, tx_fee, @@ -955,7 +960,6 @@ fn test_iterate_candidates_processing_error_transaction() { .iterate_candidates::<_, ChainstateError, _>( clarity_conn, &mut tx_events, - 2, mempool_settings.clone(), |_, available_tx, _| { count_txs += 1; @@ -1022,6 +1026,7 @@ fn test_iterate_candidates_problematic_transaction() { 0x80000000, &TransactionAnchorMode::Any, &TransactionPostConditionMode::Allow, + StacksEpochId::latest(), ); // Load 3 transactions into the mempool @@ -1045,6 +1050,7 @@ fn test_iterate_candidates_problematic_transaction() { &mut chainstate, &b_1.0, &b_1.1, + true, txid, tx_bytes, tx_fee, @@ -1069,7 +1075,6 @@ fn test_iterate_candidates_problematic_transaction() { .iterate_candidates::<_, ChainstateError, _>( clarity_conn, &mut tx_events, - 2, mempool_settings.clone(), |_, available_tx, _| { count_txs += 1; @@ -1136,6 +1141,7 @@ fn test_iterate_candidates_concurrent_write_lock() { 0x80000000, &TransactionAnchorMode::Any, &TransactionPostConditionMode::Allow, + StacksEpochId::latest(), ); let mut expected_addr_nonces = HashMap::new(); @@ -1173,6 +1179,7 @@ fn test_iterate_candidates_concurrent_write_lock() { &mut chainstate, &b_1.0, &b_1.1, + true, txid, tx_bytes, tx_fee, @@ -1189,7 +1196,7 @@ fn test_iterate_candidates_concurrent_write_lock() { mempool_tx .execute( "UPDATE mempool SET fee_rate = ? WHERE txid = ?", - rusqlite::params![Some(123.0), &txid], + params![Some(123.0), txid], ) .unwrap(); } else { @@ -1197,7 +1204,7 @@ fn test_iterate_candidates_concurrent_write_lock() { mempool_tx .execute( "UPDATE mempool SET fee_rate = ? WHERE txid = ?", - rusqlite::params![none, &txid], + params![none, txid], ) .unwrap(); } @@ -1229,7 +1236,6 @@ fn test_iterate_candidates_concurrent_write_lock() { .iterate_candidates::<_, ChainstateError, _>( clarity_conn, &mut tx_events, - 2, mempool_settings.clone(), |_, available_tx, _| { count_txs += 1; @@ -1294,6 +1300,7 @@ fn mempool_do_not_replace_tx() { 0x80000000, &TransactionAnchorMode::Any, &TransactionPostConditionMode::Allow, + StacksEpochId::latest(), ); let mut tx = txs.pop().unwrap(); @@ -1331,6 +1338,7 @@ fn mempool_do_not_replace_tx() { &mut chainstate, &b_1.0, &b_1.1, + true, txid, tx_bytes, tx_fee, @@ -1359,6 +1367,7 @@ fn mempool_do_not_replace_tx() { &mut chainstate, &b_2.0, &b_2.1, + true, txid, tx_bytes, tx_fee, @@ -1379,8 +1388,10 @@ fn mempool_do_not_replace_tx() { assert!(!MemPoolDB::db_has_tx(&mempool_tx, &txid).unwrap()); } -#[test] -fn mempool_db_load_store_replace_tx() { +#[rstest] +#[case(MempoolCollectionBehavior::ByStacksHeight)] +#[case(MempoolCollectionBehavior::ByReceiveTime)] +fn mempool_db_load_store_replace_tx(#[case] behavior: MempoolCollectionBehavior) { let mut chainstate = instantiate_chainstate(false, 0x80000000, function_name!()); let chainstate_path = chainstate_path(function_name!()); let mut mempool = MemPoolDB::open_test(false, 0x80000000, &chainstate_path).unwrap(); @@ -1390,6 +1401,7 @@ fn mempool_db_load_store_replace_tx() { 0x80000000, &TransactionAnchorMode::Any, &TransactionPostConditionMode::Allow, + StacksEpochId::latest(), ); let num_txs = txs.len() as u64; @@ -1432,6 +1444,7 @@ fn mempool_db_load_store_replace_tx() { &mut chainstate, &ConsensusHash([0x1; 20]), &BlockHeaderHash([0x2; 32]), + false, // don't resolve the above chain tip since it doesn't exist txid, tx_bytes, tx_fee, @@ -1457,12 +1470,15 @@ fn mempool_db_load_store_replace_tx() { assert_eq!(tx_info.metadata.origin_nonce, origin_nonce); assert_eq!(tx_info.metadata.sponsor_address, sponsor_address); assert_eq!(tx_info.metadata.sponsor_nonce, sponsor_nonce); - assert_eq!(tx_info.metadata.consensus_hash, ConsensusHash([0x1; 20])); assert_eq!( - tx_info.metadata.block_header_hash, + tx_info.metadata.tenure_consensus_hash, + ConsensusHash([0x1; 20]) + ); + assert_eq!( + tx_info.metadata.tenure_block_header_hash, BlockHeaderHash([0x2; 32]) ); - assert_eq!(tx_info.metadata.block_height, height); + assert_eq!(tx_info.metadata.coinbase_height, height); // test replace-by-fee with a higher fee let old_txid = txid; @@ -1489,6 +1505,7 @@ fn mempool_db_load_store_replace_tx() { &mut chainstate, &ConsensusHash([0x1; 20]), &BlockHeaderHash([0x2; 32]), + false, // don't resolve the above chain tip since it doesn't exist txid, tx_bytes, tx_fee, @@ -1525,12 +1542,15 @@ fn mempool_db_load_store_replace_tx() { assert_eq!(tx_info.metadata.origin_nonce, origin_nonce); assert_eq!(tx_info.metadata.sponsor_address, sponsor_address); assert_eq!(tx_info.metadata.sponsor_nonce, sponsor_nonce); - assert_eq!(tx_info.metadata.consensus_hash, ConsensusHash([0x1; 20])); assert_eq!( - tx_info.metadata.block_header_hash, + tx_info.metadata.tenure_consensus_hash, + ConsensusHash([0x1; 20]) + ); + assert_eq!( + tx_info.metadata.tenure_block_header_hash, BlockHeaderHash([0x2; 32]) ); - assert_eq!(tx_info.metadata.block_height, height); + assert_eq!(tx_info.metadata.coinbase_height, height); // test replace-by-fee with a lower fee let old_txid = txid; @@ -1549,6 +1569,7 @@ fn mempool_db_load_store_replace_tx() { &mut chainstate, &ConsensusHash([0x1; 20]), &BlockHeaderHash([0x2; 32]), + false, // don't resolve the above chain tip since it doesn't exist txid, tx_bytes, tx_fee, @@ -1606,7 +1627,17 @@ fn mempool_db_load_store_replace_tx() { eprintln!("garbage-collect"); let mut mempool_tx = mempool.tx_begin().unwrap(); - MemPoolDB::garbage_collect(&mut mempool_tx, 101, None).unwrap(); + match behavior { + MempoolCollectionBehavior::ByStacksHeight => { + MemPoolDB::garbage_collect_by_coinbase_height(&mut mempool_tx, 101, None) + } + MempoolCollectionBehavior::ByReceiveTime => { + let test_max_age = Duration::from_secs(1); + std::thread::sleep(2 * test_max_age); + MemPoolDB::garbage_collect_by_time(&mut mempool_tx, &test_max_age, None) + } + } + .unwrap(); mempool_tx.commit().unwrap(); let txs = MemPoolDB::get_txs_after( @@ -1688,6 +1719,7 @@ fn mempool_db_test_rbf() { &mut chainstate, &ConsensusHash([0x1; 20]), &BlockHeaderHash([0x2; 32]), + false, // don't resolve the above chain tip since it doesn't exist txid, tx_bytes, tx_fee, @@ -1737,6 +1769,7 @@ fn mempool_db_test_rbf() { &mut chainstate, &ConsensusHash([0x1; 20]), &BlockHeaderHash([0x2; 32]), + false, // don't resolve the above chain tip since it doesn't exist txid, tx_bytes, tx_fee, @@ -1819,6 +1852,7 @@ fn test_add_txs_bloom_filter() { &mut chainstate, &ConsensusHash([0x1 + (block_height as u8); 20]), &BlockHeaderHash([0x2 + (block_height as u8); 32]), + false, // don't resolve the above chain tip since it doesn't exist txid, tx_bytes, tx_fee, @@ -1929,6 +1963,7 @@ fn test_txtags() { &mut chainstate, &ConsensusHash([0x1 + (block_height as u8); 20]), &BlockHeaderHash([0x2 + (block_height as u8); 32]), + false, // don't resolve the above chain tip since it doesn't exist txid, tx_bytes, tx_fee, @@ -2022,6 +2057,7 @@ fn test_make_mempool_sync_data() { &mut chainstate, &ConsensusHash([0x1 + (block_height as u8); 20]), &BlockHeaderHash([0x2 + (block_height as u8); 32]), + false, // don't resolve the above chain tip since it doesn't exist txid.clone(), tx_bytes, tx_fee, @@ -2060,7 +2096,7 @@ fn test_make_mempool_sync_data() { let recent_txids = mempool.get_bloom_txids().unwrap(); assert!(recent_txids.len() <= MAX_BLOOM_COUNTER_TXS as usize); - let max_height = MemPoolDB::get_max_height(mempool.conn()) + let max_height = MemPoolDB::get_max_coinbase_height(mempool.conn()) .unwrap() .unwrap_or(0); eprintln!( @@ -2199,6 +2235,7 @@ fn test_find_next_missing_transactions() { &mut chainstate, &ConsensusHash([0x1 + (block_height as u8); 20]), &BlockHeaderHash([0x2 + (block_height as u8); 32]), + false, // don't resolve the above chain tip since it doesn't exist txid.clone(), tx_bytes, tx_fee, @@ -2468,6 +2505,7 @@ fn test_drop_and_blacklist_txs_by_time() { &mut chainstate, &ConsensusHash([0x1 + (block_height as u8); 20]), &BlockHeaderHash([0x2 + (block_height as u8); 32]), + false, // don't resolve the above chain tip since it doesn't exist txid.clone(), tx_bytes, tx_fee, @@ -2587,6 +2625,7 @@ fn test_drop_and_blacklist_txs_by_size() { &mut chainstate, &ConsensusHash([0x1 + (block_height as u8); 20]), &BlockHeaderHash([0x2 + (block_height as u8); 32]), + false, // don't resolve the above chain tip since it doesn't exist txid.clone(), tx_bytes, tx_fee, @@ -2704,6 +2743,7 @@ fn test_filter_txs_by_type() { &mut chainstate, &b_2.0, &b_2.1, + true, txid.clone(), tx_bytes, tx_fee, @@ -2739,7 +2779,6 @@ fn test_filter_txs_by_type() { .iterate_candidates::<_, ChainstateError, _>( clarity_conn, &mut tx_events, - 2, mempool_settings.clone(), |_, available_tx, _| { count_txs += 1; @@ -2775,7 +2814,6 @@ fn test_filter_txs_by_type() { .iterate_candidates::<_, ChainstateError, _>( clarity_conn, &mut tx_events, - 2, mempool_settings.clone(), |_, available_tx, _| { count_txs += 1; diff --git a/stackslib/src/cost_estimates/fee_medians.rs b/stackslib/src/cost_estimates/fee_medians.rs index 88ab0e9c2a2..12bd2fb9b83 100644 --- a/stackslib/src/cost_estimates/fee_medians.rs +++ b/stackslib/src/cost_estimates/fee_medians.rs @@ -2,10 +2,11 @@ use std::cmp; use std::cmp::Ordering; use std::path::Path; +use clarity::types::sqlite::NO_PARAMS; use clarity::vm::costs::ExecutionCost; -use rusqlite::types::{FromSql, FromSqlError}; +use rusqlite::types::{FromSql, FromSqlError, ToSql}; use rusqlite::{ - AndThenRows, Connection, Error as SqliteError, OptionalExtension, ToSql, + params, AndThenRows, Connection, Error as SqliteError, OpenFlags, OptionalExtension, Transaction as SqlTransaction, }; use serde_json::Value as JsonValue; @@ -62,7 +63,7 @@ impl WeightedMedianFeeRateEstimator { pub fn open(p: &Path, metric: M, window_size: u32) -> Result { let mut db = sqlite_open( p, - rusqlite::OpenFlags::SQLITE_OPEN_CREATE | rusqlite::OpenFlags::SQLITE_OPEN_READ_WRITE, + OpenFlags::SQLITE_OPEN_CREATE | OpenFlags::SQLITE_OPEN_READ_WRITE, false, )?; @@ -89,7 +90,7 @@ impl WeightedMedianFeeRateEstimator { fn instantiate_db(tx: &SqlTransaction) -> Result<(), SqliteError> { if !Self::db_already_instantiated(tx)? { - tx.execute(CREATE_TABLE, rusqlite::NO_PARAMS)?; + tx.execute(CREATE_TABLE, NO_PARAMS)?; } Ok(()) @@ -108,7 +109,7 @@ impl WeightedMedianFeeRateEstimator { let mut mids = Vec::with_capacity(window_size as usize); let mut lows = Vec::with_capacity(window_size as usize); let results = stmt - .query_and_then::<_, SqliteError, _, _>(&[window_size], |row| { + .query_and_then::<_, SqliteError, _, _>(params![window_size], |row| { let high: f64 = row.get("high")?; let middle: f64 = row.get("middle")?; let low: f64 = row.get("low")?; @@ -160,10 +161,10 @@ impl WeightedMedianFeeRateEstimator { FROM median_fee_estimator )"; tx.execute( insert_sql, - rusqlite::params![new_measure.high, new_measure.middle, new_measure.low,], + params![new_measure.high, new_measure.middle, new_measure.low,], ) .expect("SQLite failure"); - tx.execute(deletion_sql, rusqlite::params![self.window_size]) + tx.execute(deletion_sql, params![self.window_size]) .expect("SQLite failure"); let estimate = Self::get_rate_estimates_from_sql(&tx, self.window_size); diff --git a/stackslib/src/cost_estimates/fee_scalar.rs b/stackslib/src/cost_estimates/fee_scalar.rs index 1c0349e42ba..ff7911058f8 100644 --- a/stackslib/src/cost_estimates/fee_scalar.rs +++ b/stackslib/src/cost_estimates/fee_scalar.rs @@ -1,11 +1,13 @@ use std::cmp; use std::path::Path; +use clarity::types::sqlite::NO_PARAMS; use clarity::vm::costs::ExecutionCost; use clarity::vm::database::{ClaritySerializable, STXBalance}; -use rusqlite::types::{FromSql, FromSqlError}; +use rusqlite::types::{FromSql, FromSqlError, ToSql}; use rusqlite::{ - Connection, Error as SqliteError, OptionalExtension, ToSql, Transaction as SqlTransaction, + params, Connection, Error as SqliteError, OpenFlags, OptionalExtension, + Transaction as SqlTransaction, }; use serde_json::Value as JsonValue; @@ -46,7 +48,7 @@ impl ScalarFeeRateEstimator { pub fn open(p: &Path, metric: M) -> Result { let mut db = sqlite_open( p, - rusqlite::OpenFlags::SQLITE_OPEN_CREATE | rusqlite::OpenFlags::SQLITE_OPEN_READ_WRITE, + OpenFlags::SQLITE_OPEN_CREATE | OpenFlags::SQLITE_OPEN_READ_WRITE, false, )?; @@ -72,7 +74,7 @@ impl ScalarFeeRateEstimator { fn instantiate_db(tx: &SqlTransaction) -> Result<(), SqliteError> { if !Self::db_already_instantiated(tx)? { - tx.execute(CREATE_TABLE, rusqlite::NO_PARAMS)?; + tx.execute(CREATE_TABLE, NO_PARAMS)?; } Ok(()) @@ -130,7 +132,7 @@ impl ScalarFeeRateEstimator { tx.execute( sql, - rusqlite::params![ + params![ SINGLETON_ROW_ID, next_estimate.high, next_estimate.middle, @@ -238,7 +240,7 @@ impl FeeEstimator for ScalarFeeRateEstimator { fn get_rate_estimates(&self) -> Result { let sql = "SELECT high, middle, low FROM scalar_fee_estimator WHERE estimate_key = ?"; self.db - .query_row(sql, &[SINGLETON_ROW_ID], |row| { + .query_row(sql, params![SINGLETON_ROW_ID], |row| { let high: f64 = row.get(0)?; let middle: f64 = row.get(1)?; let low: f64 = row.get(2)?; diff --git a/stackslib/src/cost_estimates/pessimistic.rs b/stackslib/src/cost_estimates/pessimistic.rs index b986d54dc75..bb1cf48f382 100644 --- a/stackslib/src/cost_estimates/pessimistic.rs +++ b/stackslib/src/cost_estimates/pessimistic.rs @@ -1,10 +1,12 @@ use std::cmp; use std::path::Path; +use clarity::types::sqlite::NO_PARAMS; use clarity::vm::costs::ExecutionCost; -use rusqlite::types::{FromSql, FromSqlError}; +use rusqlite::types::{FromSql, FromSqlError, FromSqlResult, ToSql}; use rusqlite::{ - Connection, Error as SqliteError, OptionalExtension, ToSql, Transaction as SqliteTransaction, + params, Connection, Error as SqliteError, OpenFlags, OptionalExtension, + Transaction as SqliteTransaction, }; use serde_json::Value as JsonValue; @@ -76,9 +78,7 @@ impl std::fmt::Display for CostField { } impl FromSql for Samples { - fn column_result( - sql_value: rusqlite::types::ValueRef<'_>, - ) -> rusqlite::types::FromSqlResult { + fn column_result(sql_value: rusqlite::types::ValueRef<'_>) -> FromSqlResult { let json_value = JsonValue::column_result(sql_value)?; let items = serde_json::from_value(json_value).map_err(|_e| { error!("Failed to parse PessimisticEstimator sample from SQL"); @@ -144,11 +144,8 @@ impl Samples { let sql = "INSERT OR REPLACE INTO pessimistic_estimator (estimate_key, current_value, samples) VALUES (?, ?, ?)"; let current_value = u64_to_sql(self.mean()).unwrap_or_else(|_| i64::MAX); - tx.execute( - sql, - rusqlite::params![identifier, current_value, self.to_json()], - ) - .expect("SQLite failure"); + tx.execute(sql, params![identifier, current_value, self.to_json()]) + .expect("SQLite failure"); } fn get_sqlite(conn: &Connection, identifier: &str) -> Samples { @@ -172,27 +169,25 @@ impl Samples { impl PessimisticEstimator { pub fn open(p: &Path, log_error: bool) -> Result { - let db = - sqlite_open(p, rusqlite::OpenFlags::SQLITE_OPEN_READ_WRITE, false).or_else(|e| { - if let SqliteError::SqliteFailure(ref internal, _) = e { - if let rusqlite::ErrorCode::CannotOpen = internal.code { - let mut db = sqlite_open( - p, - rusqlite::OpenFlags::SQLITE_OPEN_CREATE - | rusqlite::OpenFlags::SQLITE_OPEN_READ_WRITE, - false, - )?; - let tx = tx_begin_immediate_sqlite(&mut db)?; - PessimisticEstimator::instantiate_db(&tx)?; - tx.commit()?; - Ok(db) - } else { - Err(e) - } + let db = sqlite_open(p, OpenFlags::SQLITE_OPEN_READ_WRITE, false).or_else(|e| { + if let SqliteError::SqliteFailure(ref internal, _) = e { + if let rusqlite::ErrorCode::CannotOpen = internal.code { + let mut db = sqlite_open( + p, + OpenFlags::SQLITE_OPEN_CREATE | OpenFlags::SQLITE_OPEN_READ_WRITE, + false, + )?; + let tx = tx_begin_immediate_sqlite(&mut db)?; + PessimisticEstimator::instantiate_db(&tx)?; + tx.commit()?; + Ok(db) } else { Err(e) } - })?; + } else { + Err(e) + } + })?; Ok(PessimisticEstimator { db, log_error }) } @@ -205,7 +200,7 @@ impl PessimisticEstimator { fn instantiate_db(tx: &SqliteTransaction) -> Result<(), SqliteError> { if !Self::db_already_instantiated(tx)? { - tx.execute(CREATE_TABLE, rusqlite::NO_PARAMS)?; + tx.execute(CREATE_TABLE, NO_PARAMS)?; } Ok(()) diff --git a/stackslib/src/cost_estimates/tests/common.rs b/stackslib/src/cost_estimates/tests/common.rs index 6fd21b0676d..fe6527ff53e 100644 --- a/stackslib/src/cost_estimates/tests/common.rs +++ b/stackslib/src/cost_estimates/tests/common.rs @@ -39,6 +39,7 @@ pub fn make_block_receipt(tx_receipts: Vec) -> StacksE burn_header_height: 2, burn_header_timestamp: 2, anchored_block_size: 1, + burn_view: None, }, tx_receipts, matured_rewards: vec![], diff --git a/stackslib/src/main.rs b/stackslib/src/main.rs index 39c50618d51..8660e0e9a74 100644 --- a/stackslib/src/main.rs +++ b/stackslib/src/main.rs @@ -26,6 +26,7 @@ extern crate stacks_common; #[macro_use(o, slog_log, slog_trace, slog_debug, slog_info, slog_warn, slog_error)] extern crate slog; +use stacks_common::types::MempoolCollectionBehavior; #[cfg(not(any(target_os = "macos", target_os = "windows", target_arch = "arm")))] use tikv_jemallocator::Jemalloc; @@ -33,10 +34,11 @@ use tikv_jemallocator::Jemalloc; #[global_allocator] static GLOBAL: Jemalloc = Jemalloc; -use std::collections::{HashMap, HashSet}; +use std::collections::{BTreeMap, HashMap, HashSet}; use std::fs::{File, OpenOptions}; use std::io::prelude::*; use std::io::BufReader; +use std::time::Instant; use std::{env, fs, io, process, thread}; use blockstack_lib::burnchains::bitcoin::indexer::{ @@ -47,8 +49,12 @@ use blockstack_lib::burnchains::db::{BurnchainBlockData, BurnchainDB}; use blockstack_lib::burnchains::{ Address, Burnchain, PoxConstants, Txid, BLOCKSTACK_MAGIC_MAINNET, }; -use blockstack_lib::chainstate::burn::db::sortdb::SortitionDB; -use blockstack_lib::chainstate::burn::ConsensusHash; +use blockstack_lib::chainstate::burn::db::sortdb::{ + get_block_commit_by_txid, SortitionDB, SortitionHandle, +}; +use blockstack_lib::chainstate::burn::operations::BlockstackOperationType; +use blockstack_lib::chainstate::burn::{BlockSnapshot, ConsensusHash}; +use blockstack_lib::chainstate::coordinator::{get_reward_cycle_info, OnChainRewardSetProvider}; use blockstack_lib::chainstate::nakamoto::NakamotoChainState; use blockstack_lib::chainstate::stacks::db::blocks::{DummyEventDispatcher, StagingBlock}; use blockstack_lib::chainstate::stacks::db::{ @@ -61,7 +67,6 @@ use blockstack_lib::chainstate::stacks::{StacksBlockHeader, *}; use blockstack_lib::clarity::vm::costs::ExecutionCost; use blockstack_lib::clarity::vm::types::StacksAddressExtensions; use blockstack_lib::clarity::vm::ClarityVersion; -use blockstack_lib::clarity_cli; use blockstack_lib::clarity_cli::vm_execute; use blockstack_lib::core::{MemPoolDB, *}; use blockstack_lib::cost_estimates::metrics::UnitMetric; @@ -72,21 +77,24 @@ use blockstack_lib::net::relay::Relayer; use blockstack_lib::net::StacksMessage; use blockstack_lib::util_lib::db::sqlite_open; use blockstack_lib::util_lib::strings::UrlString; +use blockstack_lib::{clarity_cli, util_lib}; use libstackerdb::StackerDBChunkData; use rusqlite::types::ToSql; -use rusqlite::{Connection, OpenFlags}; +use rusqlite::{params, Connection, Error as SqliteError, OpenFlags}; use serde_json::{json, Value}; use stacks_common::codec::{read_next, StacksMessageCodec}; use stacks_common::types::chainstate::{ BlockHeaderHash, BurnchainHeaderHash, PoxId, StacksAddress, StacksBlockId, }; use stacks_common::types::net::PeerAddress; +use stacks_common::types::sqlite::NO_PARAMS; use stacks_common::util::hash::{hex_bytes, to_hex, Hash160}; use stacks_common::util::retry::LogReader; use stacks_common::util::secp256k1::{Secp256k1PrivateKey, Secp256k1PublicKey}; use stacks_common::util::vrf::VRFProof; use stacks_common::util::{get_epoch_time_ms, log, sleep_ms}; +#[cfg_attr(test, mutants::skip)] fn main() { let mut argv: Vec = env::args().collect(); if argv.len() < 2 { @@ -398,7 +406,7 @@ Given a , obtain a 2100 header hash block inventory (with an empty "Usage: {} can-download-microblock Given a , obtain a 2100 header hash inventory (with an empty header cache), and then -check if the associated microblocks can be downloaded +check if the associated microblocks can be downloaded ", argv[0] ); @@ -637,7 +645,7 @@ simulating a miner. let result = StacksBlockBuilder::build_anchored_block( &chain_state, - &sort_db.index_conn(), + &sort_db.index_handle(&chain_tip.sortition_id), &mut mempool_db, &parent_header, chain_tip.total_burn, @@ -761,8 +769,8 @@ simulating a miner. if let Some(value) = value_opt { let conn = sqlite_open(&db_path, OpenFlags::SQLITE_OPEN_READ_ONLY, false) .expect("Failed to open DB"); - let args: &[&dyn ToSql] = &[&value.to_hex()]; - let res: Result = conn.query_row_and_then( + let args = params![&value.to_hex()]; + let res: Result = conn.query_row_and_then( "SELECT value FROM __fork_storage WHERE value_hash = ?1", args, |row| { @@ -822,18 +830,18 @@ simulating a miner. let tip = BlockHeaderHash::from_hex(&argv[3]).unwrap(); let burntip = BurnchainHeaderHash::from_hex(&argv[4]).unwrap(); - let conn = rusqlite::Connection::open(path).unwrap(); + let conn = Connection::open(path).unwrap(); let mut cur_burn = burntip.clone(); let mut cur_tip = tip.clone(); loop { println!("{}, {}", cur_burn, cur_tip); let (next_burn, next_tip) = match conn.query_row("SELECT parent_burn_header_hash, parent_anchored_block_hash FROM staging_blocks WHERE anchored_block_hash = ? and burn_header_hash = ?", - &[&cur_tip as &dyn rusqlite::types::ToSql, &cur_burn], |row| Ok((row.get_unwrap(0), row.get_unwrap(1)))) { + params![cur_tip, cur_burn], |row| Ok((row.get_unwrap(0), row.get_unwrap(1)))) { Ok(x) => x, Err(e) => { match e { - rusqlite::Error::QueryReturnedNoRows => {}, + SqliteError::QueryReturnedNoRows => {}, e => { eprintln!("SQL Error: {}", e); }, @@ -874,6 +882,7 @@ simulating a miner. eprintln!("Usage:"); eprintln!(" {n} "); eprintln!(" {n} prefix "); + eprintln!(" {n} index-range "); eprintln!(" {n} range "); eprintln!(" {n} "); process::exit(1); @@ -881,6 +890,7 @@ simulating a miner. if argv.len() < 2 { print_help_and_exit(); } + let start = Instant::now(); let stacks_path = &argv[2]; let mode = argv.get(3).map(String::as_str); let staging_blocks_db_path = format!("{stacks_path}/mainnet/chainstate/vm/index.sqlite"); @@ -890,11 +900,11 @@ simulating a miner. let query = match mode { Some("prefix") => format!( - "SELECT index_block_hash FROM staging_blocks WHERE index_block_hash LIKE \"{}%\"", + "SELECT index_block_hash FROM staging_blocks WHERE orphaned = 0 AND index_block_hash LIKE \"{}%\"", argv[4] ), Some("first") => format!( - "SELECT index_block_hash FROM staging_blocks ORDER BY height ASC LIMIT {}", + "SELECT index_block_hash FROM staging_blocks WHERE orphaned = 0 ORDER BY height ASC LIMIT {}", argv[4] ), Some("range") => { @@ -904,19 +914,27 @@ simulating a miner. let arg5 = argv[5].parse::().expect(" not a valid u64"); let start = arg4.saturating_sub(1); let blocks = arg5.saturating_sub(arg4); - format!("SELECT index_block_hash FROM staging_blocks ORDER BY height ASC LIMIT {start}, {blocks}") + format!("SELECT index_block_hash FROM staging_blocks WHERE orphaned = 0 ORDER BY height ASC LIMIT {start}, {blocks}") + } + Some("index-range") => { + let start = argv[4] + .parse::() + .expect(" not a valid u64"); + let end = argv[5].parse::().expect(" not a valid u64"); + let blocks = end.saturating_sub(start); + format!("SELECT index_block_hash FROM staging_blocks WHERE orphaned = 0 ORDER BY index_block_hash ASC LIMIT {start}, {blocks}") } Some("last") => format!( - "SELECT index_block_hash FROM staging_blocks ORDER BY height DESC LIMIT {}", + "SELECT index_block_hash FROM staging_blocks WHERE orphaned = 0 ORDER BY height DESC LIMIT {}", argv[4] ), Some(_) => print_help_and_exit(), // Default to ALL blocks - None => "SELECT index_block_hash FROM staging_blocks".into(), + None => "SELECT index_block_hash FROM staging_blocks WHERE orphaned = 0".into(), }; let mut stmt = conn.prepare(&query).unwrap(); - let mut hashes_set = stmt.query(rusqlite::NO_PARAMS).unwrap(); + let mut hashes_set = stmt.query(NO_PARAMS).unwrap(); let mut index_block_hashes: Vec = vec![]; while let Ok(Some(row)) = hashes_set.next() { @@ -931,7 +949,7 @@ simulating a miner. } replay_block(stacks_path, index_block_hash); } - println!("Finished!"); + println!("Finished. run_time_seconds = {}", start.elapsed().as_secs()); process::exit(0); } @@ -948,7 +966,7 @@ simulating a miner. byte_prefix ); let mut stmt = conn.prepare(&query).unwrap(); - let mut rows = stmt.query(rusqlite::NO_PARAMS).unwrap(); + let mut rows = stmt.query(NO_PARAMS).unwrap(); while let Ok(Some(row)) = rows.next() { let val_string: String = row.get(0).unwrap(); let clarity_value = match clarity::vm::Value::try_deserialize_hex_untyped(&val_string) { @@ -1017,6 +1035,12 @@ simulating a miner. process::exit(0); } + if argv[1] == "analyze-sortition-mev" { + analyze_sortition_mev(argv); + // should be unreachable + process::exit(1); + } + if argv[1] == "replay-chainstate" { if argv.len() < 7 { eprintln!("Usage: {} OLD_CHAINSTATE_PATH OLD_SORTITION_DB_PATH OLD_BURNCHAIN_DB_PATH NEW_CHAINSTATE_PATH NEW_BURNCHAIN_DB_PATH", &argv[0]); @@ -1169,7 +1193,7 @@ simulating a miner. // simulate the p2p refreshing itself // update p2p's read-only view of the unconfirmed state p2p_chainstate - .refresh_unconfirmed_state(&p2p_new_sortition_db.index_conn()) + .refresh_unconfirmed_state(&p2p_new_sortition_db.index_handle_at_tip()) .expect("Failed to open unconfirmed Clarity state"); sleep_ms(100); @@ -1322,6 +1346,7 @@ simulating a miner. } } +#[cfg_attr(test, mutants::skip)] fn tip_mine() { let argv: Vec = env::args().collect(); if argv.len() < 6 { @@ -1361,13 +1386,11 @@ simulating a miner. let mut mempool_db = MemPoolDB::open(true, chain_id, &chain_state_path, estimator, metric) .expect("Failed to open mempool db"); - { - info!("Clearing mempool"); - let mut tx = mempool_db.tx_begin().unwrap(); - let min_height = u32::MAX as u64; - MemPoolDB::garbage_collect(&mut tx, min_height, None).unwrap(); - tx.commit().unwrap(); - } + info!("Clearing mempool"); + let min_height = u32::MAX as u64; + mempool_db + .garbage_collect(min_height, &MempoolCollectionBehavior::ByStacksHeight, None) + .unwrap(); let header_tip = NakamotoChainState::get_canonical_block_header(chain_state.db(), &sort_db) .unwrap() @@ -1512,7 +1535,7 @@ simulating a miner. let result = StacksBlockBuilder::build_anchored_block( &chain_state, - &sort_db.index_conn(), + &sort_db.index_handle_at_tip(), &mut mempool_db, &parent_header, chain_tip.total_burn, @@ -1646,7 +1669,8 @@ fn replay_block(stacks_path: &str, index_block_hash_hex: &str) { return; }; - let block = StacksChainState::extract_stacks_block(&next_staging_block).unwrap(); + let block = + StacksChainState::extract_stacks_block(&next_staging_block).expect("Failed to get block"); let block_size = next_staging_block.block_data.len() as u64; let parent_block_header = match &parent_header_info.anchored_header { @@ -1731,3 +1755,224 @@ fn replay_block(stacks_path: &str, index_block_hash_hex: &str) { } }; } + +/// Perform an analysis of the anti-MEV algorithm in epoch 3.0, vis-a-vis the status quo. +/// Results are printed to stdout. +/// Exits with 0 on success, and 1 on failure. +fn analyze_sortition_mev(argv: Vec) { + if argv.len() < 7 || (argv.len() >= 7 && argv.len() % 2 != 1) { + eprintln!( + "Usage: {} /path/to/burnchain/db /path/to/sortition/db /path/to/chainstate/db start_height end_height [advantage_miner advantage_burn ..]", + &argv[0] + ); + process::exit(1); + } + + let burnchaindb_path = argv[2].clone(); + let sortdb_path = argv[3].clone(); + let chainstate_path = argv[4].clone(); + let start_height: u64 = argv[5].parse().unwrap(); + let end_height: u64 = argv[6].parse().unwrap(); + + let mut advantages = HashMap::new(); + if argv.len() >= 7 { + let mut i = 7; + while i + 2 < argv.len() { + let advantaged_miner = argv[i].clone(); + let advantage: u64 = argv[i + 1].parse().unwrap(); + advantages.insert(advantaged_miner, advantage); + i += 2; + } + } + + let mut sortdb = + SortitionDB::open(&sortdb_path, true, PoxConstants::mainnet_default()).unwrap(); + sortdb.dryrun = true; + let burnchain = Burnchain::new(&burnchaindb_path, "bitcoin", "mainnet").unwrap(); + let burnchaindb = BurnchainDB::connect(&burnchaindb_path, &burnchain, true).unwrap(); + let (mut chainstate, _) = + StacksChainState::open(true, 0x00000001, &chainstate_path, None).unwrap(); + + let mut wins_epoch2 = BTreeMap::new(); + let mut wins_epoch3 = BTreeMap::new(); + + for height in start_height..end_height { + debug!("Get ancestor snapshots for {}", height); + let (tip_sort_id, parent_ancestor_sn, ancestor_sn) = { + let mut sort_tx = sortdb.tx_begin_at_tip(); + let tip_sort_id = sort_tx.tip(); + let ancestor_sn = sort_tx + .get_block_snapshot_by_height(height) + .unwrap() + .unwrap(); + let parent_ancestor_sn = sort_tx + .get_block_snapshot_by_height(height - 1) + .unwrap() + .unwrap(); + (tip_sort_id, parent_ancestor_sn, ancestor_sn) + }; + + let mut burn_block = + BurnchainDB::get_burnchain_block(burnchaindb.conn(), &ancestor_sn.burn_header_hash) + .unwrap(); + + debug!( + "Get reward cycle info at {}", + burn_block.header.block_height + ); + let rc_info_opt = get_reward_cycle_info( + burn_block.header.block_height, + &burn_block.header.parent_block_hash, + &tip_sort_id, + &burnchain, + &burnchaindb, + &mut chainstate, + &mut sortdb, + &OnChainRewardSetProvider::new(), + false, + ) + .unwrap(); + + let mut ops = burn_block.ops.clone(); + for op in ops.iter_mut() { + if let BlockstackOperationType::LeaderBlockCommit(op) = op { + if let Some(extra_burn) = advantages.get(&op.apparent_sender.to_string()) { + debug!( + "Miner {} gets {} extra burn fee", + &op.apparent_sender.to_string(), + extra_burn + ); + op.burn_fee += *extra_burn; + } + } + } + burn_block.ops = ops; + + debug!("Re-evaluate sortition at height {}", height); + let (next_sn, state_transition) = sortdb + .evaluate_sortition( + &burn_block.header, + burn_block.ops.clone(), + &burnchain, + &tip_sort_id, + rc_info_opt, + |_| (), + ) + .unwrap(); + + assert_eq!(next_sn.block_height, ancestor_sn.block_height); + assert_eq!(next_sn.burn_header_hash, ancestor_sn.burn_header_hash); + + let mut sort_tx = sortdb.tx_begin_at_tip(); + let tip_pox_id = sort_tx.get_pox_id().unwrap(); + let next_sn_nakamoto = BlockSnapshot::make_snapshot_in_epoch( + &mut sort_tx, + &burnchain, + &ancestor_sn.sortition_id, + &tip_pox_id, + &parent_ancestor_sn, + &burn_block.header, + &state_transition, + 0, + StacksEpochId::Epoch30, + ) + .unwrap(); + + assert_eq!(next_sn.block_height, next_sn_nakamoto.block_height); + assert_eq!(next_sn.burn_header_hash, next_sn_nakamoto.burn_header_hash); + + let winner_epoch2 = get_block_commit_by_txid( + &sort_tx, + &ancestor_sn.sortition_id, + &next_sn.winning_block_txid, + ) + .unwrap() + .map(|cmt| format!("{:?}", &cmt.apparent_sender.to_string())) + .unwrap_or("(null)".to_string()); + + let winner_epoch3 = get_block_commit_by_txid( + &sort_tx, + &ancestor_sn.sortition_id, + &next_sn_nakamoto.winning_block_txid, + ) + .unwrap() + .map(|cmt| format!("{:?}", &cmt.apparent_sender.to_string())) + .unwrap_or("(null)".to_string()); + + wins_epoch2.insert( + (next_sn.block_height, next_sn.burn_header_hash), + winner_epoch2, + ); + wins_epoch3.insert( + ( + next_sn_nakamoto.block_height, + next_sn_nakamoto.burn_header_hash, + ), + winner_epoch3, + ); + } + + let mut all_wins_epoch2 = BTreeMap::new(); + let mut all_wins_epoch3 = BTreeMap::new(); + + println!("Wins epoch 2"); + println!("------------"); + println!("height,burn_header_hash,winner"); + for ((height, bhh), winner) in wins_epoch2.iter() { + println!("{},{},{}", height, bhh, winner); + if let Some(cnt) = all_wins_epoch2.get_mut(winner) { + *cnt += 1; + } else { + all_wins_epoch2.insert(winner, 1); + } + } + + println!("------------"); + println!("Wins epoch 3"); + println!("------------"); + println!("height,burn_header_hash,winner"); + for ((height, bhh), winner) in wins_epoch3.iter() { + println!("{},{},{}", height, bhh, winner); + if let Some(cnt) = all_wins_epoch3.get_mut(winner) { + *cnt += 1; + } else { + all_wins_epoch3.insert(winner, 1); + } + } + + println!("---------------"); + println!("Differences"); + println!("---------------"); + println!("height,burn_header_hash,winner_epoch2,winner_epoch3"); + for ((height, bhh), winner) in wins_epoch2.iter() { + let Some(epoch3_winner) = wins_epoch3.get(&(*height, *bhh)) else { + continue; + }; + if epoch3_winner != winner { + println!("{},{},{},{}", height, bhh, winner, epoch3_winner); + } + } + + println!("---------------"); + println!("All epoch2 wins"); + println!("---------------"); + println!("miner,count"); + for (winner, count) in all_wins_epoch2.iter() { + println!("{},{}", winner, count); + } + + println!("---------------"); + println!("All epoch3 wins"); + println!("---------------"); + println!("miner,count,degradation"); + for (winner, count) in all_wins_epoch3.into_iter() { + let degradation = (count as f64) + / (all_wins_epoch2 + .get(&winner) + .map(|cnt| *cnt as f64) + .unwrap_or(0.00000000000001f64)); + println!("{},{},{}", &winner, count, degradation); + } + + process::exit(0); +} diff --git a/stackslib/src/monitoring/mod.rs b/stackslib/src/monitoring/mod.rs index fa83fe97aba..7f1aa9db26a 100644 --- a/stackslib/src/monitoring/mod.rs +++ b/stackslib/src/monitoring/mod.rs @@ -23,6 +23,7 @@ use std::{fmt, fs}; use clarity::vm::costs::ExecutionCost; use lazy_static::lazy_static; use rusqlite::{OpenFlags, OptionalExtension}; +use stacks_common::types::sqlite::NO_PARAMS; use stacks_common::util::get_epoch_time_secs; use stacks_common::util::uint::{Uint256, Uint512}; @@ -46,9 +47,10 @@ pub fn increment_rpc_calls_counter() { prometheus::RPC_CALL_COUNTER.inc(); } +#[allow(unused_mut)] pub fn instrument_http_request_handler( conv_http: &mut ConversationHttp, - mut req: StacksHttpRequest, + #[allow(unused_mut)] mut req: StacksHttpRequest, handler: F, ) -> Result where @@ -209,7 +211,7 @@ fn txid_tracking_db(chainstate_root_path: &str) -> Result if create_flag { conn.execute( "CREATE TABLE processed_txids (txid TEXT NOT NULL PRIMARY KEY)", - rusqlite::NO_PARAMS, + NO_PARAMS, )?; } diff --git a/stackslib/src/net/api/callreadonly.rs b/stackslib/src/net/api/callreadonly.rs index eb07206772d..150ed1ca1e6 100644 --- a/stackslib/src/net/api/callreadonly.rs +++ b/stackslib/src/net/api/callreadonly.rs @@ -234,52 +234,56 @@ impl RPCRequestHandler for RPCCallReadOnlyRequestHandler { cost_limit.write_length = 0; cost_limit.write_count = 0; - chainstate.maybe_read_only_clarity_tx(&sortdb.index_conn(), &tip, |clarity_tx| { - let epoch = clarity_tx.get_epoch(); - let cost_track = clarity_tx - .with_clarity_db_readonly(|clarity_db| { - LimitedCostTracker::new_mid_block( - mainnet, chain_id, cost_limit, clarity_db, epoch, - ) - }) - .map_err(|_| { - ClarityRuntimeError::from(InterpreterError::CostContractLoadFailure) - })?; - - let clarity_version = clarity_tx - .with_analysis_db_readonly(|analysis_db| { - analysis_db.get_clarity_version(&contract_identifier) - }) - .map_err(|_| { - ClarityRuntimeError::from(CheckErrors::NoSuchContract(format!( - "{}", - &contract_identifier - ))) - })?; - - clarity_tx.with_readonly_clarity_env( - mainnet, - chain_id, - clarity_version, - sender, - sponsor, - cost_track, - |env| { - // we want to execute any function as long as no actual writes are made as - // opposed to be limited to purely calling `define-read-only` functions, - // so use `read_only = false`. This broadens the number of functions that - // can be called, and also circumvents limitations on `define-read-only` - // functions that can not use `contrac-call?`, even when calling other - // read-only functions - env.execute_contract( - &contract_identifier, - function.as_str(), - &args, - false, - ) - }, - ) - }) + chainstate.maybe_read_only_clarity_tx( + &sortdb.index_handle_at_block(chainstate, &tip)?, + &tip, + |clarity_tx| { + let epoch = clarity_tx.get_epoch(); + let cost_track = clarity_tx + .with_clarity_db_readonly(|clarity_db| { + LimitedCostTracker::new_mid_block( + mainnet, chain_id, cost_limit, clarity_db, epoch, + ) + }) + .map_err(|_| { + ClarityRuntimeError::from(InterpreterError::CostContractLoadFailure) + })?; + + let clarity_version = clarity_tx + .with_analysis_db_readonly(|analysis_db| { + analysis_db.get_clarity_version(&contract_identifier) + }) + .map_err(|_| { + ClarityRuntimeError::from(CheckErrors::NoSuchContract(format!( + "{}", + &contract_identifier + ))) + })?; + + clarity_tx.with_readonly_clarity_env( + mainnet, + chain_id, + clarity_version, + sender, + sponsor, + cost_track, + |env| { + // we want to execute any function as long as no actual writes are made as + // opposed to be limited to purely calling `define-read-only` functions, + // so use `read_only = false`. This broadens the number of functions that + // can be called, and also circumvents limitations on `define-read-only` + // functions that can not use `contrac-call?`, even when calling other + // read-only functions + env.execute_contract( + &contract_identifier, + function.as_str(), + &args, + false, + ) + }, + ) + }, + ) }); // decode the response diff --git a/stackslib/src/net/api/get_tenures_fork_info.rs b/stackslib/src/net/api/get_tenures_fork_info.rs new file mode 100644 index 00000000000..8bcf32ce1d9 --- /dev/null +++ b/stackslib/src/net/api/get_tenures_fork_info.rs @@ -0,0 +1,314 @@ +// Copyright (C) 2024 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use std::io::{Read, Seek, SeekFrom, Write}; +use std::{fs, io}; + +use regex::{Captures, Regex}; +use serde::de::Error as de_Error; +use stacks_common::codec::{StacksMessageCodec, MAX_MESSAGE_LEN}; +use stacks_common::types::chainstate::{ + BlockHeaderHash, BurnchainHeaderHash, ConsensusHash, SortitionId, StacksBlockId, +}; +use stacks_common::types::net::PeerHost; +use stacks_common::types::StacksEpochId; +use stacks_common::util::hash::{to_hex, Hash160}; +use stacks_common::util::HexError; +use {serde, serde_json}; + +use crate::chainstate::burn::db::sortdb::SortitionDB; +use crate::chainstate::burn::BlockSnapshot; +use crate::chainstate::nakamoto::{NakamotoBlock, NakamotoChainState, NakamotoStagingBlocksConn}; +use crate::chainstate::stacks::db::StacksChainState; +use crate::chainstate::stacks::Error as ChainError; +use crate::net::api::getblock_v3::NakamotoBlockStream; +use crate::net::api::{prefix_hex, prefix_opt_hex}; +use crate::net::http::{ + parse_bytes, parse_json, Error, HttpBadRequest, HttpChunkGenerator, HttpContentType, + HttpNotFound, HttpRequest, HttpRequestContents, HttpRequestPreamble, HttpResponse, + HttpResponseContents, HttpResponsePayload, HttpResponsePreamble, HttpServerError, HttpVersion, +}; +use crate::net::httpcore::{ + HttpRequestContentsExtensions, RPCRequestHandler, StacksHttp, StacksHttpRequest, + StacksHttpResponse, +}; +use crate::net::{Error as NetError, StacksNodeState, TipRequest, MAX_HEADERS}; +use crate::util_lib::db::{DBConn, Error as DBError}; + +pub static RPC_TENURE_FORKING_INFO_PATH: &str = "/v3/tenures/fork_info"; + +static DEPTH_LIMIT: usize = 10; + +/// Struct for information about a tenure that is used to determine whether +/// or not the tenure should have been validly forked. +#[derive(PartialEq, Debug, Clone, Serialize, Deserialize)] +pub struct TenureForkingInfo { + /// The burnchain header hash of the block that triggered this event. + #[serde(with = "prefix_hex")] + pub burn_block_hash: BurnchainHeaderHash, + /// The burn height of the block that triggered this event. + pub burn_block_height: u64, + /// This sortition ID of the block that triggered this event. This incorporates + /// PoX forking information and the burn block hash to obtain an identifier that is + /// unique across PoX forks and burnchain forks. + #[serde(with = "prefix_hex")] + pub sortition_id: SortitionId, + /// The parent of this burn block's Sortition ID + #[serde(with = "prefix_hex")] + pub parent_sortition_id: SortitionId, + /// The consensus hash of the block that triggered this event. This incorporates + /// PoX forking information and burn op information to obtain an identifier that is + /// unique across PoX forks and burnchain forks. + #[serde(with = "prefix_hex")] + pub consensus_hash: ConsensusHash, + /// Boolean indicating whether or not there was a succesful sortition (i.e. a winning + /// block or miner was chosen). + pub was_sortition: bool, + /// If the sortition occurred, and a block was mined during the tenure, this is the + /// tenure's first block. + #[serde(with = "prefix_opt_hex")] + pub first_block_mined: Option, +} + +#[derive(Clone, Default)] +pub struct GetTenuresForkInfo { + pub start_sortition: Option, + pub stop_sortition: Option, +} + +/// Decode the HTTP request +impl HttpRequest for GetTenuresForkInfo { + fn verb(&self) -> &'static str { + "GET" + } + + fn path_regex(&self) -> Regex { + Regex::new(&format!( + r#"^{RPC_TENURE_FORKING_INFO_PATH}/(?P[0-9a-f]{{40}})/(?P[0-9a-f]{{40}})$"# + )) + .unwrap() + } + + /// Try to decode this request. + /// There's nothing to load here, so just make sure the request is well-formed. + fn try_parse_request( + &mut self, + preamble: &HttpRequestPreamble, + captures: &Captures, + query: Option<&str>, + _body: &[u8], + ) -> Result { + if preamble.get_content_length() != 0 { + return Err(Error::DecodeError( + "Invalid Http request: expected 0-length body".to_string(), + )); + } + + let req_contents = HttpRequestContents::new().query_string(query); + + let start_str = captures + .name("start") + .ok_or_else(|| { + Error::DecodeError("Failed to match path to start_sortition group".to_string()) + })? + .as_str(); + let stop_str = captures + .name("stop") + .ok_or_else(|| { + Error::DecodeError("Failed to match path to stop_sortition group".to_string()) + })? + .as_str(); + let start_sortition = ConsensusHash::from_hex(start_str).map_err(|_| { + Error::DecodeError("Invalid path: unparseable consensus hash".to_string()) + })?; + let stop_sortition = ConsensusHash::from_hex(stop_str).map_err(|_| { + Error::DecodeError("Invalid path: unparseable consensus hash".to_string()) + })?; + self.start_sortition = Some(start_sortition); + self.stop_sortition = Some(stop_sortition); + + Ok(req_contents) + } + + fn metrics_identifier(&self) -> &str { + RPC_TENURE_FORKING_INFO_PATH + } +} + +impl TenureForkingInfo { + fn from_snapshot( + sn: &BlockSnapshot, + sortdb: &SortitionDB, + chainstate: &StacksChainState, + tip_block_id: &StacksBlockId, + ) -> Result { + let first_block_mined = if !sn.sortition { + None + } else { + // is this a nakamoto sortition? + let epoch = SortitionDB::get_stacks_epoch(sortdb.conn(), sn.block_height)?.ok_or_else( + || { + warn!( + "Failed to lookup stacks epoch for processed snapshot height {}", + sn.block_height + ); + ChainError::InvalidChainstateDB + }, + )?; + if epoch.epoch_id < StacksEpochId::Epoch30 { + StacksChainState::get_stacks_block_header_info_by_consensus_hash( + chainstate.db(), + &sn.consensus_hash, + )? + .map(|header| header.index_block_hash()) + } else { + NakamotoChainState::get_nakamoto_tenure_start_block_header( + &mut chainstate.index_conn(), + tip_block_id, + &sn.consensus_hash, + )? + .map(|header| header.index_block_hash()) + } + }; + Ok(TenureForkingInfo { + burn_block_hash: sn.burn_header_hash.clone(), + burn_block_height: sn.block_height, + sortition_id: sn.sortition_id.clone(), + parent_sortition_id: sn.parent_sortition_id.clone(), + consensus_hash: sn.consensus_hash.clone(), + was_sortition: sn.sortition, + first_block_mined, + }) + } +} + +impl RPCRequestHandler for GetTenuresForkInfo { + /// Reset internal state + fn restart(&mut self) { + self.start_sortition = None; + self.stop_sortition = None; + } + + /// Make the response + fn try_handle_request( + &mut self, + preamble: HttpRequestPreamble, + _contents: HttpRequestContents, + node: &mut StacksNodeState, + ) -> Result<(HttpResponsePreamble, HttpResponseContents), NetError> { + let result = node.with_node_state(|network, sortdb, chainstate, _mempool, _rpc_args| { + let start_from = self + .stop_sortition + .clone() + .ok_or_else(|| ChainError::NoSuchBlockError)?; + let recurse_end = self + .start_sortition + .clone() + .ok_or_else(|| ChainError::NoSuchBlockError)?; + let recurse_end_snapshot = + SortitionDB::get_block_snapshot_consensus(sortdb.conn(), &recurse_end)? + .ok_or_else(|| ChainError::NoSuchBlockError)?; + let height_bound = recurse_end_snapshot.block_height; + + let mut results = vec![]; + let mut cursor = SortitionDB::get_block_snapshot_consensus(sortdb.conn(), &start_from)? + .ok_or_else(|| ChainError::NoSuchBlockError)?; + results.push(TenureForkingInfo::from_snapshot( + &cursor, + sortdb, + chainstate, + &network.stacks_tip.block_id(), + )?); + let handle = sortdb.index_handle(&cursor.sortition_id); + let mut depth = 0; + while depth < DEPTH_LIMIT && cursor.consensus_hash != recurse_end { + depth += 1; + if height_bound >= cursor.block_height { + return Err(ChainError::NotInSameFork); + } + cursor = handle + .get_last_snapshot_with_sortition(cursor.block_height.saturating_sub(1))?; + results.push(TenureForkingInfo::from_snapshot( + &cursor, + sortdb, + chainstate, + &network.stacks_tip.block_id(), + )?); + } + + Ok(results) + }); + + let tenures = match result { + Ok(tenures) => tenures, + Err(ChainError::NotInSameFork) => { + return StacksHttpResponse::new_error( + &preamble, + &HttpBadRequest::new_json(serde_json::json!( + "Supplied start and end sortitions are not in the same sortition fork" + )), + ) + .try_into_contents() + .map_err(NetError::from); + } + Err(ChainError::NoSuchBlockError) => { + return StacksHttpResponse::new_error( + &preamble, + &HttpNotFound::new(format!( + "Could not find snapshot {:?}\n", + &self.stop_sortition + )), + ) + .try_into_contents() + .map_err(NetError::from) + } + Err(e) => { + // nope -- error trying to check + let msg = format!( + "Failed to load snapshots for range ({:?}, {:?}]: {:?}\n", + &self.start_sortition, &self.stop_sortition, &e + ); + warn!("{msg}"); + return StacksHttpResponse::new_error(&preamble, &HttpServerError::new(msg)) + .try_into_contents() + .map_err(NetError::from); + } + }; + + let resp_preamble = HttpResponsePreamble::from_http_request_preamble( + &preamble, + 200, + "OK", + None, + HttpContentType::JSON, + ); + + Ok(( + resp_preamble, + HttpResponseContents::try_from_json(&tenures)?, + )) + } +} + +impl HttpResponse for GetTenuresForkInfo { + fn try_parse_response( + &self, + preamble: &HttpResponsePreamble, + body: &[u8], + ) -> Result { + let tenures_info: Vec = parse_json(preamble, body)?; + Ok(HttpResponsePayload::try_from_json(tenures_info)?) + } +} diff --git a/stackslib/src/net/api/getaccount.rs b/stackslib/src/net/api/getaccount.rs index 83a39f30318..7cbf0a8210f 100644 --- a/stackslib/src/net/api/getaccount.rs +++ b/stackslib/src/net/api/getaccount.rs @@ -146,76 +146,80 @@ impl RPCRequestHandler for RPCGetAccountRequestHandler { let account_opt_res = node.with_node_state(|_network, sortdb, chainstate, _mempool, _rpc_args| { - chainstate.maybe_read_only_clarity_tx(&sortdb.index_conn(), &tip, |clarity_tx| { - clarity_tx.with_clarity_db_readonly(|clarity_db| { - let key = ClarityDatabase::make_key_for_account_balance(&account); - let burn_block_height = - clarity_db.get_current_burnchain_block_height().ok()? as u64; - let v1_unlock_height = clarity_db.get_v1_unlock_height(); - let v2_unlock_height = clarity_db.get_v2_unlock_height().ok()?; - let v3_unlock_height = clarity_db.get_v3_unlock_height().ok()?; - let (balance, balance_proof) = if with_proof { - clarity_db - .get_data_with_proof::(&key) - .ok() - .flatten() - .map(|(a, b)| (a, Some(format!("0x{}", to_hex(&b))))) - .unwrap_or_else(|| (STXBalance::zero(), Some("".into()))) - } else { - clarity_db - .get_data::(&key) - .ok() - .flatten() - .map(|a| (a, None)) - .unwrap_or_else(|| (STXBalance::zero(), None)) - }; + chainstate.maybe_read_only_clarity_tx( + &sortdb.index_handle_at_block(chainstate, &tip)?, + &tip, + |clarity_tx| { + clarity_tx.with_clarity_db_readonly(|clarity_db| { + let key = ClarityDatabase::make_key_for_account_balance(&account); + let burn_block_height = + clarity_db.get_current_burnchain_block_height().ok()? as u64; + let v1_unlock_height = clarity_db.get_v1_unlock_height(); + let v2_unlock_height = clarity_db.get_v2_unlock_height().ok()?; + let v3_unlock_height = clarity_db.get_v3_unlock_height().ok()?; + let (balance, balance_proof) = if with_proof { + clarity_db + .get_data_with_proof::(&key) + .ok() + .flatten() + .map(|(a, b)| (a, Some(format!("0x{}", to_hex(&b))))) + .unwrap_or_else(|| (STXBalance::zero(), Some("".into()))) + } else { + clarity_db + .get_data::(&key) + .ok() + .flatten() + .map(|a| (a, None)) + .unwrap_or_else(|| (STXBalance::zero(), None)) + }; - let key = ClarityDatabase::make_key_for_account_nonce(&account); - let (nonce, nonce_proof) = if with_proof { - clarity_db - .get_data_with_proof(&key) - .ok() - .flatten() - .map(|(a, b)| (a, Some(format!("0x{}", to_hex(&b))))) - .unwrap_or_else(|| (0, Some("".into()))) - } else { - clarity_db - .get_data(&key) - .ok() - .flatten() - .map(|a| (a, None)) - .unwrap_or_else(|| (0, None)) - }; + let key = ClarityDatabase::make_key_for_account_nonce(&account); + let (nonce, nonce_proof) = if with_proof { + clarity_db + .get_data_with_proof(&key) + .ok() + .flatten() + .map(|(a, b)| (a, Some(format!("0x{}", to_hex(&b))))) + .unwrap_or_else(|| (0, Some("".into()))) + } else { + clarity_db + .get_data(&key) + .ok() + .flatten() + .map(|a| (a, None)) + .unwrap_or_else(|| (0, None)) + }; - let unlocked = balance - .get_available_balance_at_burn_block( + let unlocked = balance + .get_available_balance_at_burn_block( + burn_block_height, + v1_unlock_height, + v2_unlock_height, + v3_unlock_height, + ) + .ok()?; + + let (locked, unlock_height) = balance.get_locked_balance_at_burn_block( burn_block_height, v1_unlock_height, v2_unlock_height, v3_unlock_height, - ) - .ok()?; - - let (locked, unlock_height) = balance.get_locked_balance_at_burn_block( - burn_block_height, - v1_unlock_height, - v2_unlock_height, - v3_unlock_height, - ); + ); - let balance = format!("0x{}", to_hex(&unlocked.to_be_bytes())); - let locked = format!("0x{}", to_hex(&locked.to_be_bytes())); + let balance = format!("0x{}", to_hex(&unlocked.to_be_bytes())); + let locked = format!("0x{}", to_hex(&locked.to_be_bytes())); - Some(AccountEntryResponse { - balance, - locked, - unlock_height, - nonce, - balance_proof, - nonce_proof, + Some(AccountEntryResponse { + balance, + locked, + unlock_height, + nonce, + balance_proof, + nonce_proof, + }) }) - }) - }) + }, + ) }); let account = if let Ok(Some(account)) = account_opt_res { diff --git a/stackslib/src/net/api/getconstantval.rs b/stackslib/src/net/api/getconstantval.rs index f9b2881ac52..b08d1c68351 100644 --- a/stackslib/src/net/api/getconstantval.rs +++ b/stackslib/src/net/api/getconstantval.rs @@ -144,20 +144,24 @@ impl RPCRequestHandler for RPCGetConstantValRequestHandler { let data_resp = node.with_node_state(|_network, sortdb, chainstate, _mempool, _rpc_args| { - chainstate.maybe_read_only_clarity_tx(&sortdb.index_conn(), &tip, |clarity_tx| { - clarity_tx.with_clarity_db_readonly(|clarity_db| { - let contract = clarity_db.get_contract(&contract_identifier).ok()?; - - let cst = contract - .contract_context - .lookup_variable(constant_name.as_str())? - .serialize_to_hex() - .ok()?; - - let data = format!("0x{cst}"); - Some(ConstantValResponse { data }) - }) - }) + chainstate.maybe_read_only_clarity_tx( + &sortdb.index_handle_at_block(chainstate, &tip)?, + &tip, + |clarity_tx| { + clarity_tx.with_clarity_db_readonly(|clarity_db| { + let contract = clarity_db.get_contract(&contract_identifier).ok()?; + + let cst = contract + .contract_context + .lookup_variable(constant_name.as_str())? + .serialize_to_hex() + .ok()?; + + let data = format!("0x{cst}"); + Some(ConstantValResponse { data }) + }) + }, + ) }); let data_resp = match data_resp { diff --git a/stackslib/src/net/api/getcontractabi.rs b/stackslib/src/net/api/getcontractabi.rs index 7fc38433e75..d98c2c66231 100644 --- a/stackslib/src/net/api/getcontractabi.rs +++ b/stackslib/src/net/api/getcontractabi.rs @@ -132,14 +132,18 @@ impl RPCRequestHandler for RPCGetContractAbiRequestHandler { let data_resp = node.with_node_state(|_network, sortdb, chainstate, _mempool, _rpc_args| { - chainstate.maybe_read_only_clarity_tx(&sortdb.index_conn(), &tip, |clarity_tx| { - let epoch = clarity_tx.get_epoch(); - clarity_tx.with_analysis_db_readonly(|db| { - db.load_contract(&contract_identifier, &epoch) - .ok()? - .map(|contract| contract.contract_interface) - }) - }) + chainstate.maybe_read_only_clarity_tx( + &sortdb.index_handle_at_block(chainstate, &tip)?, + &tip, + |clarity_tx| { + let epoch = clarity_tx.get_epoch(); + clarity_tx.with_analysis_db_readonly(|db| { + db.load_contract(&contract_identifier, &epoch) + .ok()? + .map(|contract| contract.contract_interface) + }) + }, + ) }); let data_resp = match data_resp { diff --git a/stackslib/src/net/api/getcontractsrc.rs b/stackslib/src/net/api/getcontractsrc.rs index 32963f5319b..139995988ea 100644 --- a/stackslib/src/net/api/getcontractsrc.rs +++ b/stackslib/src/net/api/getcontractsrc.rs @@ -140,30 +140,34 @@ impl RPCRequestHandler for RPCGetContractSrcRequestHandler { let data_resp = node.with_node_state(|_network, sortdb, chainstate, _mempool, _rpc_args| { - chainstate.maybe_read_only_clarity_tx(&sortdb.index_conn(), &tip, |clarity_tx| { - clarity_tx.with_clarity_db_readonly(|db| { - let source = db.get_contract_src(&contract_identifier)?; - let contract_commit_key = make_contract_hash_key(&contract_identifier); - let (contract_commit, proof) = if with_proof { - db.get_data_with_proof::(&contract_commit_key) - .ok() - .flatten() - .map(|(a, b)| (a, Some(format!("0x{}", to_hex(&b)))))? - } else { - db.get_data::(&contract_commit_key) - .ok() - .flatten() - .map(|a| (a, None))? - }; - - let publish_height = contract_commit.block_height; - Some(ContractSrcResponse { - source, - publish_height, - marf_proof: proof, + chainstate.maybe_read_only_clarity_tx( + &sortdb.index_handle_at_block(chainstate, &tip)?, + &tip, + |clarity_tx| { + clarity_tx.with_clarity_db_readonly(|db| { + let source = db.get_contract_src(&contract_identifier)?; + let contract_commit_key = make_contract_hash_key(&contract_identifier); + let (contract_commit, proof) = if with_proof { + db.get_data_with_proof::(&contract_commit_key) + .ok() + .flatten() + .map(|(a, b)| (a, Some(format!("0x{}", to_hex(&b)))))? + } else { + db.get_data::(&contract_commit_key) + .ok() + .flatten() + .map(|a| (a, None))? + }; + + let publish_height = contract_commit.block_height; + Some(ContractSrcResponse { + source, + publish_height, + marf_proof: proof, + }) }) - }) - }) + }, + ) }); let data_resp = match data_resp { diff --git a/stackslib/src/net/api/getdatavar.rs b/stackslib/src/net/api/getdatavar.rs index f624f3ca589..f3a4acb7d36 100644 --- a/stackslib/src/net/api/getdatavar.rs +++ b/stackslib/src/net/api/getdatavar.rs @@ -154,26 +154,30 @@ impl RPCRequestHandler for RPCGetDataVarRequestHandler { ); let data_opt = node.with_node_state(|_network, sortdb, chainstate, _mempool, _rpc_args| { - chainstate.maybe_read_only_clarity_tx(&sortdb.index_conn(), &tip, |clarity_tx| { - clarity_tx.with_clarity_db_readonly(|clarity_db| { - let (value_hex, marf_proof): (String, _) = if with_proof { - clarity_db - .get_data_with_proof(&key) - .ok() - .flatten() - .map(|(a, b)| (a, Some(format!("0x{}", to_hex(&b)))))? - } else { - clarity_db - .get_data(&key) - .ok() - .flatten() - .map(|a| (a, None))? - }; - - let data = format!("0x{}", value_hex); - Some(DataVarResponse { data, marf_proof }) - }) - }) + chainstate.maybe_read_only_clarity_tx( + &sortdb.index_handle_at_block(chainstate, &tip)?, + &tip, + |clarity_tx| { + clarity_tx.with_clarity_db_readonly(|clarity_db| { + let (value_hex, marf_proof): (String, _) = if with_proof { + clarity_db + .get_data_with_proof(&key) + .ok() + .flatten() + .map(|(a, b)| (a, Some(format!("0x{}", to_hex(&b)))))? + } else { + clarity_db + .get_data(&key) + .ok() + .flatten() + .map(|a| (a, None))? + }; + + let data = format!("0x{}", value_hex); + Some(DataVarResponse { data, marf_proof }) + }) + }, + ) }); let data_resp = match data_opt { diff --git a/stackslib/src/net/api/getinfo.rs b/stackslib/src/net/api/getinfo.rs index 38c802f966a..237205f63a8 100644 --- a/stackslib/src/net/api/getinfo.rs +++ b/stackslib/src/net/api/getinfo.rs @@ -82,6 +82,7 @@ pub struct RPCPeerInfoData { pub unanchored_tip: Option, pub unanchored_seq: Option, pub exit_at_block_height: Option, + pub is_fully_synced: bool, #[serde(default)] #[serde(skip_serializing_if = "Option::is_none")] pub node_public_key: Option, @@ -105,6 +106,7 @@ impl RPCPeerInfoData { chainstate: &StacksChainState, exit_at_block_height: Option, genesis_chainstate_hash: &Sha256Sum, + ibd: bool, ) -> RPCPeerInfoData { let server_version = version_string( "stacks-node", @@ -130,6 +132,7 @@ impl RPCPeerInfoData { let public_key_buf = StacksPublicKeyBuffer::from_public_key(&public_key); let public_key_hash = Hash160::from_node_public_key(&public_key); let stackerdb_contract_ids = network.get_local_peer().stacker_dbs.clone(); + let is_fully_synced = !ibd; RPCPeerInfoData { peer_version: network.burnchain.peer_version, @@ -140,12 +143,13 @@ impl RPCPeerInfoData { server_version, network_id: network.local_peer.network_id, parent_network_id: network.local_peer.parent_network_id, - stacks_tip_height: network.stacks_tip.2, - stacks_tip: network.stacks_tip.1.clone(), - stacks_tip_consensus_hash: network.stacks_tip.0.clone(), + stacks_tip_height: network.stacks_tip.height, + stacks_tip: network.stacks_tip.block_hash.clone(), + stacks_tip_consensus_hash: network.stacks_tip.consensus_hash.clone(), unanchored_tip: unconfirmed_tip, unanchored_seq: unconfirmed_seq, exit_at_block_height: exit_at_block_height, + is_fully_synced, genesis_chainstate_hash: genesis_chainstate_hash.clone(), node_public_key: Some(public_key_buf), node_public_key_hash: Some(public_key_hash), @@ -212,6 +216,7 @@ impl RPCRequestHandler for RPCPeerInfoRequestHandler { _contents: HttpRequestContents, node: &mut StacksNodeState, ) -> Result<(HttpResponsePreamble, HttpResponseContents), NetError> { + let ibd = node.ibd; let rpc_peer_info = node.with_node_state(|network, _sortdb, chainstate, _mempool, rpc_args| { RPCPeerInfoData::from_network( @@ -219,6 +224,7 @@ impl RPCRequestHandler for RPCPeerInfoRequestHandler { chainstate, rpc_args.exit_at_block_height.clone(), &rpc_args.genesis_chainstate_hash, + ibd, ) }); let mut preamble = HttpResponsePreamble::ok_json(&preamble); diff --git a/stackslib/src/net/api/getistraitimplemented.rs b/stackslib/src/net/api/getistraitimplemented.rs index 16b1e2fd33e..3b8e07ad1ab 100644 --- a/stackslib/src/net/api/getistraitimplemented.rs +++ b/stackslib/src/net/api/getistraitimplemented.rs @@ -160,34 +160,38 @@ impl RPCRequestHandler for RPCGetIsTraitImplementedRequestHandler { let data_resp = node.with_node_state(|_network, sortdb, chainstate, _mempool, _rpc_args| { - chainstate.maybe_read_only_clarity_tx(&sortdb.index_conn(), &tip, |clarity_tx| { - clarity_tx.with_clarity_db_readonly(|db| { - let analysis = db - .load_contract_analysis(&contract_identifier) - .ok() - .flatten()?; - if analysis.implemented_traits.contains(&trait_id) { - Some(GetIsTraitImplementedResponse { - is_implemented: true, - }) - } else { - let trait_defining_contract = db - .load_contract_analysis(&trait_id.contract_identifier) + chainstate.maybe_read_only_clarity_tx( + &sortdb.index_handle_at_block(chainstate, &tip)?, + &tip, + |clarity_tx| { + clarity_tx.with_clarity_db_readonly(|db| { + let analysis = db + .load_contract_analysis(&contract_identifier) .ok() .flatten()?; - let trait_definition = - trait_defining_contract.get_defined_trait(&trait_id.name)?; - let is_implemented = analysis - .check_trait_compliance( - &db.get_clarity_epoch_version().ok()?, - &trait_id, - trait_definition, - ) - .is_ok(); - Some(GetIsTraitImplementedResponse { is_implemented }) - } - }) - }) + if analysis.implemented_traits.contains(&trait_id) { + Some(GetIsTraitImplementedResponse { + is_implemented: true, + }) + } else { + let trait_defining_contract = db + .load_contract_analysis(&trait_id.contract_identifier) + .ok() + .flatten()?; + let trait_definition = + trait_defining_contract.get_defined_trait(&trait_id.name)?; + let is_implemented = analysis + .check_trait_compliance( + &db.get_clarity_epoch_version().ok()?, + &trait_id, + trait_definition, + ) + .is_ok(); + Some(GetIsTraitImplementedResponse { is_implemented }) + } + }) + }, + ) }); let data_resp = match data_resp { diff --git a/stackslib/src/net/api/getmapentry.rs b/stackslib/src/net/api/getmapentry.rs index b5db5af0416..cb318b59960 100644 --- a/stackslib/src/net/api/getmapentry.rs +++ b/stackslib/src/net/api/getmapentry.rs @@ -183,34 +183,38 @@ impl RPCRequestHandler for RPCGetMapEntryRequestHandler { let data_resp = node.with_node_state(|_network, sortdb, chainstate, _mempool, _rpc_args| { - chainstate.maybe_read_only_clarity_tx(&sortdb.index_conn(), &tip, |clarity_tx| { - clarity_tx.with_clarity_db_readonly(|clarity_db| { - let (value_hex, marf_proof): (String, _) = if with_proof { - clarity_db - .get_data_with_proof(&key) - .ok() - .flatten() - .map(|(a, b)| (a, Some(format!("0x{}", to_hex(&b))))) - .unwrap_or_else(|| { - test_debug!("No value for '{}' in {}", &key, tip); - (none_response, Some("".into())) - }) - } else { - clarity_db - .get_data(&key) - .ok() - .flatten() - .map(|a| (a, None)) - .unwrap_or_else(|| { - test_debug!("No value for '{}' in {}", &key, tip); - (none_response, None) - }) - }; + chainstate.maybe_read_only_clarity_tx( + &sortdb.index_handle_at_block(chainstate, &tip)?, + &tip, + |clarity_tx| { + clarity_tx.with_clarity_db_readonly(|clarity_db| { + let (value_hex, marf_proof): (String, _) = if with_proof { + clarity_db + .get_data_with_proof(&key) + .ok() + .flatten() + .map(|(a, b)| (a, Some(format!("0x{}", to_hex(&b))))) + .unwrap_or_else(|| { + test_debug!("No value for '{}' in {}", &key, tip); + (none_response, Some("".into())) + }) + } else { + clarity_db + .get_data(&key) + .ok() + .flatten() + .map(|a| (a, None)) + .unwrap_or_else(|| { + test_debug!("No value for '{}' in {}", &key, tip); + (none_response, None) + }) + }; - let data = format!("0x{}", value_hex); - MapEntryResponse { data, marf_proof } - }) - }) + let data = format!("0x{}", value_hex); + MapEntryResponse { data, marf_proof } + }) + }, + ) }); let data_resp = match data_resp { diff --git a/stackslib/src/net/api/getneighbors.rs b/stackslib/src/net/api/getneighbors.rs index 51454352a18..6707ed3ba16 100644 --- a/stackslib/src/net/api/getneighbors.rs +++ b/stackslib/src/net/api/getneighbors.rs @@ -51,9 +51,43 @@ pub struct RPCNeighbor { pub public_key_hash: Hash160, pub authenticated: bool, #[serde(skip_serializing_if = "Option::is_none")] + #[serde(with = "serde_opt_vec_qci")] pub stackerdbs: Option>, } +/// Serialize and deserialize `Option>` +/// using the `to_string()` and `parse()` implementations of `QualifiedContractIdentifier`. +mod serde_opt_vec_qci { + use clarity::vm::types::QualifiedContractIdentifier; + use serde::{Deserialize, Serialize}; + + pub fn serialize( + opt: &Option>, + serializer: S, + ) -> Result { + let serialize_as: Option> = opt + .as_ref() + .map(|vec_qci| vec_qci.iter().map(ToString::to_string).collect()); + serialize_as.serialize(serializer) + } + + pub fn deserialize<'de, D>(de: D) -> Result>, D::Error> + where + D: serde::Deserializer<'de>, + { + let from_str: Option> = Deserialize::deserialize(de)?; + let Some(vec_str) = from_str else { + return Ok(None); + }; + let parse_opt: Result, _> = vec_str + .into_iter() + .map(|x| QualifiedContractIdentifier::parse(&x).map_err(serde::de::Error::custom)) + .collect(); + let out_vec = parse_opt?; + Ok(Some(out_vec)) + } +} + impl RPCNeighbor { pub fn from_neighbor_key_and_pubkh( nk: NeighborKey, diff --git a/stackslib/src/net/api/getpoxinfo.rs b/stackslib/src/net/api/getpoxinfo.rs index 9e3cd906d47..81868c81f8c 100644 --- a/stackslib/src/net/api/getpoxinfo.rs +++ b/stackslib/src/net/api/getpoxinfo.rs @@ -190,17 +190,21 @@ impl RPCPoxInfoData { + 1; let data = chainstate - .maybe_read_only_clarity_tx(&sortdb.index_conn(), tip, |clarity_tx| { - clarity_tx.with_readonly_clarity_env( - mainnet, - chain_id, - ClarityVersion::Clarity2, - sender, - None, - cost_track, - |env| env.execute_contract(&contract_identifier, function, &[], true), - ) - }) + .maybe_read_only_clarity_tx( + &sortdb.index_handle_at_block(chainstate, tip)?, + tip, + |clarity_tx| { + clarity_tx.with_readonly_clarity_env( + mainnet, + chain_id, + ClarityVersion::Clarity2, + sender, + None, + cost_track, + |env| env.execute_contract(&contract_identifier, function, &[], true), + ) + }, + ) .map_err(|_| NetError::NotFoundError)?; let res = match data { diff --git a/stackslib/src/net/api/getsortition.rs b/stackslib/src/net/api/getsortition.rs new file mode 100644 index 00000000000..5e0557ca26b --- /dev/null +++ b/stackslib/src/net/api/getsortition.rs @@ -0,0 +1,308 @@ +// Copyright (C) 2024 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use std::io::{Read, Seek, SeekFrom, Write}; +use std::{fs, io}; + +use regex::{Captures, Regex}; +use serde::de::Error as de_Error; +use stacks_common::codec::{StacksMessageCodec, MAX_MESSAGE_LEN}; +use stacks_common::types::chainstate::{ + BlockHeaderHash, BurnchainHeaderHash, ConsensusHash, SortitionId, StacksBlockId, +}; +use stacks_common::types::net::PeerHost; +use stacks_common::util::hash::{to_hex, Hash160}; +use stacks_common::util::HexError; +use {serde, serde_json}; + +use crate::chainstate::burn::db::sortdb::SortitionDB; +use crate::chainstate::nakamoto::{NakamotoBlock, NakamotoChainState, NakamotoStagingBlocksConn}; +use crate::chainstate::stacks::db::StacksChainState; +use crate::chainstate::stacks::Error as ChainError; +use crate::net::api::getblock_v3::NakamotoBlockStream; +use crate::net::api::{prefix_hex, prefix_opt_hex}; +use crate::net::http::{ + parse_bytes, parse_json, Error, HttpBadRequest, HttpChunkGenerator, HttpContentType, + HttpNotFound, HttpRequest, HttpRequestContents, HttpRequestPreamble, HttpResponse, + HttpResponseContents, HttpResponsePayload, HttpResponsePreamble, HttpServerError, HttpVersion, +}; +use crate::net::httpcore::{ + HttpRequestContentsExtensions, RPCRequestHandler, StacksHttp, StacksHttpRequest, + StacksHttpResponse, +}; +use crate::net::{Error as NetError, StacksNodeState, TipRequest, MAX_HEADERS}; +use crate::util_lib::db::{DBConn, Error as DBError}; + +#[derive(Debug, Clone, PartialEq)] +pub enum QuerySpecifier { + ConsensusHash(ConsensusHash), + BurnchainHeaderHash(BurnchainHeaderHash), + BlockHeight(u64), + Latest, +} + +pub static RPC_SORTITION_INFO_PATH: &str = "/v3/sortitions"; +static PATH_REGEX: &str = "^/v3/sortitions(/(?P[a-z_]{1,15})/(?P[0-9a-f]{1,64}))?$"; + +/// Struct for sortition information returned via the GetSortition API call +#[derive(PartialEq, Debug, Clone, Serialize, Deserialize)] +pub struct SortitionInfo { + /// The burnchain header hash of the block that triggered this event. + #[serde(with = "prefix_hex")] + pub burn_block_hash: BurnchainHeaderHash, + /// The burn height of the block that triggered this event. + pub burn_block_height: u64, + /// The burn block time of the sortition + pub burn_header_timestamp: u64, + /// This sortition ID of the block that triggered this event. This incorporates + /// PoX forking information and the burn block hash to obtain an identifier that is + /// unique across PoX forks and burnchain forks. + #[serde(with = "prefix_hex")] + pub sortition_id: SortitionId, + /// The parent of this burn block's Sortition ID + #[serde(with = "prefix_hex")] + pub parent_sortition_id: SortitionId, + /// The consensus hash of the block that triggered this event. This incorporates + /// PoX forking information and burn op information to obtain an identifier that is + /// unique across PoX forks and burnchain forks. + #[serde(with = "prefix_hex")] + pub consensus_hash: ConsensusHash, + /// Boolean indicating whether or not there was a succesful sortition (i.e. a winning + /// block or miner was chosen). + pub was_sortition: bool, + /// If sortition occurred, and the miner's VRF key registration + /// associated a nakamoto mining pubkey with their commit, this + /// will contain the Hash160 of that mining key. + #[serde(with = "prefix_opt_hex")] + pub miner_pk_hash160: Option, + /// If sortition occurred, this will be the consensus hash of the burn block corresponding + /// to the winning block commit's parent block ptr. In 3.x, this is the consensus hash of + /// the tenure that this new burn block's miner will be building off of. + #[serde(with = "prefix_opt_hex")] + pub stacks_parent_ch: Option, + /// If sortition occurred, this will be the consensus hash of the most recent sortition before + /// this one. + #[serde(with = "prefix_opt_hex")] + pub last_sortition_ch: Option, + #[serde(with = "prefix_opt_hex")] + /// In Stacks 2.x, this is the winning block. + /// In Stacks 3.x, this is the first block of the parent tenure. + pub committed_block_hash: Option, +} + +impl TryFrom<(&str, &str)> for QuerySpecifier { + type Error = Error; + + fn try_from(value: (&str, &str)) -> Result { + let hex_str = if value.1.starts_with("0x") { + &value.1[2..] + } else { + value.1 + }; + match value.0 { + "consensus" => Ok(Self::ConsensusHash( + ConsensusHash::from_hex(hex_str).map_err(|e| Error::DecodeError(e.to_string()))?, + )), + "burn" => Ok(Self::BurnchainHeaderHash( + BurnchainHeaderHash::from_hex(hex_str) + .map_err(|e| Error::DecodeError(e.to_string()))?, + )), + "burn_height" => Ok(Self::BlockHeight( + value + .1 + .parse::() + .map_err(|e| Error::DecodeError(e.to_string()))?, + )), + other => Err(Error::DecodeError(format!("Unknown query param: {other}"))), + } + } +} + +#[derive(Clone)] +pub struct GetSortitionHandler { + pub query: QuerySpecifier, +} + +impl GetSortitionHandler { + pub fn new() -> Self { + Self { + query: QuerySpecifier::Latest, + } + } +} +/// Decode the HTTP request +impl HttpRequest for GetSortitionHandler { + fn verb(&self) -> &'static str { + "GET" + } + + fn path_regex(&self) -> Regex { + Regex::new(PATH_REGEX).unwrap() + } + + /// Try to decode this request. + /// There's nothing to load here, so just make sure the request is well-formed. + fn try_parse_request( + &mut self, + preamble: &HttpRequestPreamble, + captures: &Captures, + query: Option<&str>, + _body: &[u8], + ) -> Result { + if preamble.get_content_length() != 0 { + return Err(Error::DecodeError( + "Invalid Http request: expected 0-length body".to_string(), + )); + } + + let req_contents = HttpRequestContents::new().query_string(query); + self.query = QuerySpecifier::Latest; + if let (Some(key), Some(value)) = (captures.name("key"), captures.name("value")) { + self.query = QuerySpecifier::try_from((key.as_str(), value.as_str()))?; + } + + Ok(req_contents) + } + + fn metrics_identifier(&self) -> &str { + RPC_SORTITION_INFO_PATH + } +} + +impl RPCRequestHandler for GetSortitionHandler { + /// Reset internal state + fn restart(&mut self) { + self.query = QuerySpecifier::Latest; + } + + /// Make the response + fn try_handle_request( + &mut self, + preamble: HttpRequestPreamble, + _contents: HttpRequestContents, + node: &mut StacksNodeState, + ) -> Result<(HttpResponsePreamble, HttpResponseContents), NetError> { + let result = + node.with_node_state(|network, sortdb, _chainstate, _mempool, _rpc_args| { + let query_result = match self.query { + QuerySpecifier::Latest => { + Ok(Some(network.burnchain_tip.clone())) + }, + QuerySpecifier::ConsensusHash(ref consensus_hash) => { + SortitionDB::get_block_snapshot_consensus(sortdb.conn(), consensus_hash) + }, + QuerySpecifier::BurnchainHeaderHash(ref burn_hash) => { + let handle = sortdb.index_handle_at_tip(); + handle.get_block_snapshot(burn_hash) + }, + QuerySpecifier::BlockHeight(burn_height) => { + let handle = sortdb.index_handle_at_tip(); + handle.get_block_snapshot_by_height(burn_height) + }, + }; + let sortition_sn = query_result? + .ok_or_else(|| ChainError::NoSuchBlockError)?; + + let (miner_pk_hash160, stacks_parent_ch, committed_block_hash, last_sortition_ch) = if !sortition_sn.sortition { + let handle = sortdb.index_handle(&sortition_sn.sortition_id); + let last_sortition = handle.get_last_snapshot_with_sortition(sortition_sn.block_height)?; + (None, None, None, Some(last_sortition.consensus_hash)) + } else { + let block_commit = SortitionDB::get_block_commit(sortdb.conn(), &sortition_sn.winning_block_txid, &sortition_sn.sortition_id)? + .ok_or_else(|| { + error!( + "Failed to load block commit from Sortition DB for snapshot with a winning block txid"; + "sortition_id" => %sortition_sn.sortition_id, + "txid" => %sortition_sn.winning_block_txid, + ); + ChainError::NoSuchBlockError + })?; + let handle = sortdb.index_handle(&sortition_sn.sortition_id); + let stacks_parent_sn = handle.get_block_snapshot_by_height(block_commit.parent_block_ptr.into())? + .ok_or_else(|| { + warn!( + "Failed to load the snapshot of the winning block commits parent"; + "sortition_id" => %sortition_sn.sortition_id, + "txid" => %sortition_sn.winning_block_txid, + ); + ChainError::NoSuchBlockError + })?; + + // try to figure out what the last snapshot in this fork was with a successful + // sortition. + // optimization heuristic: short-circuit the load if its just `stacks_parent_sn` + let last_sortition_ch = if sortition_sn.num_sortitions == stacks_parent_sn.num_sortitions + 1 { + stacks_parent_sn.consensus_hash.clone() + } else { + // we actually need to perform the marf lookup + let last_sortition = handle.get_last_snapshot_with_sortition(sortition_sn.block_height.saturating_sub(1))?; + last_sortition.consensus_hash + }; + + (sortition_sn.miner_pk_hash.clone(), Some(stacks_parent_sn.consensus_hash), Some(block_commit.block_header_hash), + Some(last_sortition_ch)) + }; + + Ok(SortitionInfo { + burn_block_hash: sortition_sn.burn_header_hash, + burn_block_height: sortition_sn.block_height, + burn_header_timestamp: sortition_sn.burn_header_timestamp, + sortition_id: sortition_sn.sortition_id, + parent_sortition_id: sortition_sn.parent_sortition_id, + consensus_hash: sortition_sn.consensus_hash, + was_sortition: sortition_sn.sortition, + miner_pk_hash160, + stacks_parent_ch, + last_sortition_ch, + committed_block_hash, + }) + }); + + let block = match result { + Ok(block) => block, + Err(ChainError::NoSuchBlockError) => { + return StacksHttpResponse::new_error( + &preamble, + &HttpNotFound::new(format!("Could not find snapshot {:?}\n", &self.query)), + ) + .try_into_contents() + .map_err(NetError::from) + } + Err(e) => { + // nope -- error trying to check + let msg = format!("Failed to load snapshot for {:?}: {:?}\n", &self.query, &e); + warn!("{msg}"); + return StacksHttpResponse::new_error(&preamble, &HttpServerError::new(msg)) + .try_into_contents() + .map_err(NetError::from); + } + }; + + let preamble = HttpResponsePreamble::ok_json(&preamble); + let result = HttpResponseContents::try_from_json(&block)?; + Ok((preamble, result)) + } +} + +impl HttpResponse for GetSortitionHandler { + fn try_parse_response( + &self, + preamble: &HttpResponsePreamble, + body: &[u8], + ) -> Result { + let sortition_info: SortitionInfo = parse_json(preamble, body)?; + Ok(HttpResponsePayload::try_from_json(sortition_info)?) + } +} diff --git a/stackslib/src/net/api/getstackers.rs b/stackslib/src/net/api/getstackers.rs index 4546b66fc93..4fd42340708 100644 --- a/stackslib/src/net/api/getstackers.rs +++ b/stackslib/src/net/api/getstackers.rs @@ -51,6 +51,38 @@ pub struct GetStackersResponse { pub stacker_set: RewardSet, } +pub enum GetStackersErrors { + NotAvailableYet(crate::chainstate::coordinator::Error), + Other(String), +} + +impl GetStackersErrors { + pub const NOT_AVAILABLE_ERR_TYPE: &'static str = "not_available_try_again"; + pub const OTHER_ERR_TYPE: &'static str = "other"; + + pub fn error_type_string(&self) -> &'static str { + match self { + Self::NotAvailableYet(_) => Self::NOT_AVAILABLE_ERR_TYPE, + Self::Other(_) => Self::OTHER_ERR_TYPE, + } + } +} + +impl From<&str> for GetStackersErrors { + fn from(value: &str) -> Self { + GetStackersErrors::Other(value.into()) + } +} + +impl std::fmt::Display for GetStackersErrors { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match self { + GetStackersErrors::NotAvailableYet(e) => write!(f, "Could not read reward set. Prepare phase may not have started for this cycle yet. Err = {e:?}"), + GetStackersErrors::Other(msg) => write!(f, "{msg}") + } + } +} + impl GetStackersResponse { pub fn load( sortdb: &SortitionDB, @@ -58,7 +90,7 @@ impl GetStackersResponse { tip: &StacksBlockId, burnchain: &Burnchain, cycle_number: u64, - ) -> Result { + ) -> Result { let cycle_start_height = burnchain.reward_cycle_to_block_height(cycle_number); let pox_contract_name = burnchain @@ -74,16 +106,9 @@ impl GetStackersResponse { } let provider = OnChainRewardSetProvider::new(); - let stacker_set = provider.read_reward_set_nakamoto( - cycle_start_height, - chainstate, - burnchain, - sortdb, - tip, - true, - ).map_err( - |e| format!("Could not read reward set. Prepare phase may not have started for this cycle yet. Cycle = {cycle_number}, Err = {e:?}") - )?; + let stacker_set = provider + .read_reward_set_nakamoto(cycle_start_height, chainstate, burnchain, sortdb, tip, true) + .map_err(GetStackersErrors::NotAvailableYet)?; Ok(Self { stacker_set }) } @@ -173,10 +198,13 @@ impl RPCRequestHandler for GetStackersRequestHandler { let response = match stacker_response { Ok(response) => response, - Err(err_str) => { + Err(error) => { return StacksHttpResponse::new_error( &preamble, - &HttpBadRequest::new_json(json!({"response": "error", "err_msg": err_str})), + &HttpBadRequest::new_json(json!({ + "response": "error", + "err_type": error.error_type_string(), + "err_msg": error.to_string()})), ) .try_into_contents() .map_err(NetError::from) @@ -227,3 +255,31 @@ impl StacksHttpResponse { Ok(response) } } + +#[cfg(test)] +mod test { + use super::GetStackersErrors; + + #[test] + // Test the formatting and error type strings of GetStackersErrors + fn get_stackers_errors() { + let not_available_err = GetStackersErrors::NotAvailableYet( + crate::chainstate::coordinator::Error::PoXNotProcessedYet, + ); + let other_err = GetStackersErrors::Other("foo".into()); + + assert_eq!( + not_available_err.error_type_string(), + GetStackersErrors::NOT_AVAILABLE_ERR_TYPE + ); + assert_eq!( + other_err.error_type_string(), + GetStackersErrors::OTHER_ERR_TYPE + ); + + assert!(not_available_err + .to_string() + .starts_with("Could not read reward set")); + assert_eq!(other_err.to_string(), "foo".to_string()); + } +} diff --git a/stackslib/src/net/api/getstxtransfercost.rs b/stackslib/src/net/api/getstxtransfercost.rs index 5f732c65009..b8801e7d7c7 100644 --- a/stackslib/src/net/api/getstxtransfercost.rs +++ b/stackslib/src/net/api/getstxtransfercost.rs @@ -16,6 +16,7 @@ use std::io::{Read, Write}; +use clarity::vm::costs::ExecutionCost; use regex::{Captures, Regex}; use stacks_common::types::chainstate::{ BlockHeaderHash, ConsensusHash, StacksBlockId, StacksPublicKey, @@ -23,6 +24,7 @@ use stacks_common::types::chainstate::{ use stacks_common::types::net::PeerHost; use stacks_common::types::StacksPublicKeyBuffer; use stacks_common::util::hash::{Hash160, Sha256Sum}; +use url::form_urlencoded; use crate::burnchains::affirmation::AffirmationMap; use crate::burnchains::Txid; @@ -30,19 +32,23 @@ use crate::chainstate::burn::db::sortdb::SortitionDB; use crate::chainstate::stacks::db::blocks::MINIMUM_TX_FEE_RATE_PER_BYTE; use crate::chainstate::stacks::db::StacksChainState; use crate::core::mempool::MemPoolDB; +use crate::net::api::postfeerate::RPCPostFeeRateRequestHandler; use crate::net::http::{ - parse_json, Error, HttpRequest, HttpRequestContents, HttpRequestPreamble, HttpResponse, - HttpResponseContents, HttpResponsePayload, HttpResponsePreamble, + parse_json, Error, HttpBadRequest, HttpRequest, HttpRequestContents, HttpRequestPreamble, + HttpResponse, HttpResponseContents, HttpResponsePayload, HttpResponsePreamble, }; use crate::net::httpcore::{ HttpPreambleExtensions, RPCRequestHandler, StacksHttpRequest, StacksHttpResponse, }; use crate::net::p2p::PeerNetwork; -use crate::net::{Error as NetError, StacksNodeState}; +use crate::net::{Error as NetError, HttpServerError, StacksNodeState}; use crate::version_string; +pub(crate) const SINGLESIG_TX_TRANSFER_LEN: u64 = 180; + #[derive(Clone)] pub struct RPCGetStxTransferCostRequestHandler {} + impl RPCGetStxTransferCostRequestHandler { pub fn new() -> Self { Self {} @@ -74,7 +80,7 @@ impl HttpRequest for RPCGetStxTransferCostRequestHandler { ) -> Result { if preamble.get_content_length() != 0 { return Err(Error::DecodeError( - "Invalid Http request: expected 0-length body for GetInfo".to_string(), + "Invalid Http request: expected 0-length body".to_string(), )); } Ok(HttpRequestContents::new().query_string(query)) @@ -92,9 +98,57 @@ impl RPCRequestHandler for RPCGetStxTransferCostRequestHandler { _contents: HttpRequestContents, node: &mut StacksNodeState, ) -> Result<(HttpResponsePreamble, HttpResponseContents), NetError> { - // todo -- need to actually estimate the cost / length for token transfers - // right now, it just uses the minimum. - let fee = MINIMUM_TX_FEE_RATE_PER_BYTE; + // NOTE: The estimated length isn't needed per se because we're returning a fee rate, but + // we do need an absolute length to use the estimator (so supply a common one). + let estimated_len = SINGLESIG_TX_TRANSFER_LEN; + + let fee_resp = node.with_node_state(|_network, sortdb, _chainstate, _mempool, rpc_args| { + let tip = self.get_canonical_burn_chain_tip(&preamble, sortdb)?; + let stacks_epoch = self.get_stacks_epoch(&preamble, sortdb, tip.block_height)?; + + if let Some((_, fee_estimator, metric)) = rpc_args.get_estimators_ref() { + // STX transfer transactions have zero runtime cost + let estimated_cost = ExecutionCost::zero(); + let estimations = + RPCPostFeeRateRequestHandler::estimate_tx_fee_from_cost_and_length( + &preamble, + fee_estimator, + metric, + estimated_cost, + estimated_len, + stacks_epoch, + )? + .estimations; + if estimations.len() != 3 { + // logic bug, but treat as runtime error + return Err(StacksHttpResponse::new_error( + &preamble, + &HttpServerError::new( + "Logic error in fee estimation: did not get three estimates".into(), + ), + )); + } + + // safety -- checked estimations.len() == 3 above + let median_estimation = &estimations[1]; + + // NOTE: this returns the fee _rate_ + Ok(median_estimation.fee / estimated_len) + } else { + // unlike `POST /v2/fees/transaction`, this method can't fail due to the + // unavailability of cost estimation, so just assume the minimum fee. + debug!("Fee and cost estimation not configured on this stacks node"); + Ok(MINIMUM_TX_FEE_RATE_PER_BYTE) + } + }); + + let fee = match fee_resp { + Ok(fee) => fee, + Err(response) => { + return response.try_into_contents().map_err(NetError::from); + } + }; + let mut preamble = HttpResponsePreamble::ok_json(&preamble); preamble.set_canonical_stacks_tip_height(Some(node.canonical_stacks_tip_height())); let body = HttpResponseContents::try_from_json(&fee)?; @@ -116,13 +170,9 @@ impl HttpResponse for RPCGetStxTransferCostRequestHandler { impl StacksHttpRequest { pub fn new_get_stx_transfer_cost(host: PeerHost) -> StacksHttpRequest { - StacksHttpRequest::new_for_peer( - host, - "GET".into(), - "/v2/fees/transfer".into(), - HttpRequestContents::new(), - ) - .expect("FATAL: failed to construct request from infallible data") + let contents = HttpRequestContents::new(); + StacksHttpRequest::new_for_peer(host, "GET".into(), "/v2/fees/transfer".into(), contents) + .expect("FATAL: failed to construct request from infallible data") } } diff --git a/stackslib/src/net/api/gettenure.rs b/stackslib/src/net/api/gettenure.rs index c3eb4493fee..24c3c87d71c 100644 --- a/stackslib/src/net/api/gettenure.rs +++ b/stackslib/src/net/api/gettenure.rs @@ -19,7 +19,7 @@ use std::{fs, io}; use regex::{Captures, Regex}; use serde::de::Error as de_Error; -use stacks_common::codec::{StacksMessageCodec, MAX_MESSAGE_LEN}; +use stacks_common::codec::{StacksMessageCodec, MAX_PAYLOAD_LEN}; use stacks_common::types::chainstate::{ConsensusHash, StacksBlockId}; use stacks_common::types::net::PeerHost; use stacks_common::util::hash::to_hex; @@ -46,7 +46,7 @@ pub struct RPCNakamotoTenureRequestHandler { /// Block to start streaming from. It and its ancestors will be incrementally streamed until one of /// hte following happens: /// * we reach the first block in the tenure - /// * we would exceed MAX_MESSAGE_LEN bytes transmitted if we started sending the next block + /// * we would exceed MAX_PAYLOAD_LEN bytes transmitted if we started sending the next block pub block_id: Option, /// What's the final block ID to stream from? /// Passed as `stop=` query parameter @@ -132,7 +132,7 @@ impl NakamotoTenureStream { self.total_sent = self .total_sent .saturating_add(self.block_stream.total_bytes); - if self.total_sent.saturating_add(parent_size) > MAX_MESSAGE_LEN.into() { + if self.total_sent.saturating_add(parent_size) > MAX_PAYLOAD_LEN.into() { // out of space to send this return Ok(false); } @@ -284,7 +284,7 @@ impl HttpResponse for RPCNakamotoTenureRequestHandler { preamble: &HttpResponsePreamble, body: &[u8], ) -> Result { - let bytes = parse_bytes(preamble, body, MAX_MESSAGE_LEN.into())?; + let bytes = parse_bytes(preamble, body, MAX_PAYLOAD_LEN.into())?; Ok(HttpResponsePayload::Bytes(bytes)) } } diff --git a/stackslib/src/net/api/gettenureinfo.rs b/stackslib/src/net/api/gettenureinfo.rs index e03b6317f49..44d67a46791 100644 --- a/stackslib/src/net/api/gettenureinfo.rs +++ b/stackslib/src/net/api/gettenureinfo.rs @@ -22,7 +22,7 @@ use serde::de::Error as de_Error; use stacks_common::codec::{StacksMessageCodec, MAX_MESSAGE_LEN}; use stacks_common::types::chainstate::{ConsensusHash, StacksBlockId}; use stacks_common::types::net::PeerHost; -use stacks_common::util::hash::to_hex; +use stacks_common::util::hash::{to_hex, Sha512Trunc256Sum}; use {serde, serde_json}; use crate::chainstate::nakamoto::{NakamotoBlock, NakamotoChainState, NakamotoStagingBlocksConn}; @@ -116,15 +116,18 @@ impl RPCRequestHandler for RPCNakamotoTenureInfoRequestHandler { ) -> Result<(HttpResponsePreamble, HttpResponseContents), NetError> { let info = node.with_node_state(|network, _sortdb, _chainstate, _mempool, _rpc_args| { RPCGetTenureInfo { - consensus_hash: network.stacks_tip.0.clone(), + consensus_hash: network.stacks_tip.consensus_hash.clone(), tenure_start_block_id: network.tenure_start_block_id.clone(), - parent_consensus_hash: network.parent_stacks_tip.0.clone(), + parent_consensus_hash: network.parent_stacks_tip.consensus_hash.clone(), parent_tenure_start_block_id: StacksBlockId::new( - &network.parent_stacks_tip.0, - &network.parent_stacks_tip.1, + &network.parent_stacks_tip.consensus_hash, + &network.parent_stacks_tip.block_hash, ), - tip_block_id: StacksBlockId::new(&network.stacks_tip.0, &network.stacks_tip.1), - tip_height: network.stacks_tip.2, + tip_block_id: StacksBlockId::new( + &network.stacks_tip.consensus_hash, + &network.stacks_tip.block_hash, + ), + tip_height: network.stacks_tip.height, reward_cycle: network .burnchain .block_height_to_reward_cycle(network.burnchain_tip.block_height) diff --git a/stackslib/src/net/api/mod.rs b/stackslib/src/net/api/mod.rs index f1af0a9e604..d256c15b975 100644 --- a/stackslib/src/net/api/mod.rs +++ b/stackslib/src/net/api/mod.rs @@ -16,7 +16,11 @@ use clarity::vm::costs::ExecutionCost; use stacks_common::codec::read_next; -use stacks_common::types::chainstate::{BlockHeaderHash, StacksBlockId}; +use stacks_common::types::chainstate::{ + BlockHeaderHash, BurnchainHeaderHash, ConsensusHash, SortitionId, StacksBlockId, +}; +use stacks_common::util::hash::Hash160; +use stacks_common::util::HexError; use crate::burnchains::Txid; use crate::chainstate::stacks::{StacksMicroblock, StacksTransaction}; @@ -32,6 +36,7 @@ use crate::net::Error as NetError; use crate::stacks_common::codec::StacksMessageCodec; pub mod callreadonly; +pub mod get_tenures_fork_info; pub mod getaccount; pub mod getattachment; pub mod getattachmentsinv; @@ -50,6 +55,7 @@ pub mod getmicroblocks_indexed; pub mod getmicroblocks_unconfirmed; pub mod getneighbors; pub mod getpoxinfo; +pub mod getsortition; pub mod getstackerdbchunk; pub mod getstackerdbmetadata; pub mod getstackers; @@ -60,6 +66,8 @@ pub mod gettransaction_unconfirmed; pub mod liststackerdbreplicas; pub mod postblock; pub mod postblock_proposal; +#[warn(unused_imports)] +pub mod postblock_v3; pub mod postfeerate; pub mod postmempoolquery; pub mod postmicroblock; @@ -109,8 +117,10 @@ impl StacksHttp { getstackerdbmetadata::RPCGetStackerDBMetadataRequestHandler::new(), ); self.register_rpc_endpoint(getstackers::GetStackersRequestHandler::default()); + self.register_rpc_endpoint(getsortition::GetSortitionHandler::new()); self.register_rpc_endpoint(gettenure::RPCNakamotoTenureRequestHandler::new()); self.register_rpc_endpoint(gettenureinfo::RPCNakamotoTenureInfoRequestHandler::new()); + self.register_rpc_endpoint(get_tenures_fork_info::GetTenuresForkInfo::default()); self.register_rpc_endpoint( gettransaction_unconfirmed::RPCGetTransactionUnconfirmedRequestHandler::new(), ); @@ -121,12 +131,12 @@ impl StacksHttp { self.register_rpc_endpoint(postblock_proposal::RPCBlockProposalRequestHandler::new( self.block_proposal_token.clone(), )); + self.register_rpc_endpoint(postblock_v3::RPCPostBlockRequestHandler::default()); self.register_rpc_endpoint(postfeerate::RPCPostFeeRateRequestHandler::new()); self.register_rpc_endpoint(postmempoolquery::RPCMempoolQueryRequestHandler::new()); self.register_rpc_endpoint(postmicroblock::RPCPostMicroblockRequestHandler::new()); self.register_rpc_endpoint(poststackerdbchunk::RPCPostStackerDBChunkRequestHandler::new()); self.register_rpc_endpoint(posttransaction::RPCPostTransactionRequestHandler::new()); - self.register_rpc_endpoint(getstackers::GetStackersRequestHandler::default()); } } @@ -139,3 +149,84 @@ impl From for Error { } } } + +/// This module serde encodes and decodes optional byte fields in RPC +/// responses as Some(String) where the String is a `0x` prefixed +/// hex string. +pub mod prefix_opt_hex { + pub fn serialize( + val: &Option, + s: S, + ) -> Result { + match val { + Some(ref some_val) => { + let val_str = format!("0x{some_val:x}"); + s.serialize_some(&val_str) + } + None => s.serialize_none(), + } + } + + pub fn deserialize<'de, D: serde::Deserializer<'de>, T: super::HexDeser>( + d: D, + ) -> Result, D::Error> { + let opt_inst_str: Option = serde::Deserialize::deserialize(d)?; + let Some(inst_str) = opt_inst_str else { + return Ok(None); + }; + let Some(hex_str) = inst_str.get(2..) else { + return Err(serde::de::Error::invalid_length( + inst_str.len(), + &"at least length 2 string", + )); + }; + let val = T::try_from(&hex_str).map_err(serde::de::Error::custom)?; + Ok(Some(val)) + } +} + +/// This module serde encodes and decodes byte fields in RPC +/// responses as a String where the String is a `0x` prefixed +/// hex string. +pub mod prefix_hex { + pub fn serialize( + val: &T, + s: S, + ) -> Result { + s.serialize_str(&format!("0x{val:x}")) + } + + pub fn deserialize<'de, D: serde::Deserializer<'de>, T: super::HexDeser>( + d: D, + ) -> Result { + let inst_str: String = serde::Deserialize::deserialize(d)?; + let Some(hex_str) = inst_str.get(2..) else { + return Err(serde::de::Error::invalid_length( + inst_str.len(), + &"at least length 2 string", + )); + }; + T::try_from(&hex_str).map_err(serde::de::Error::custom) + } +} + +pub trait HexDeser: Sized { + fn try_from(hex: &str) -> Result; +} + +macro_rules! impl_hex_deser { + ($thing:ident) => { + impl HexDeser for $thing { + fn try_from(hex: &str) -> Result { + $thing::from_hex(hex) + } + } + }; +} + +impl_hex_deser!(BurnchainHeaderHash); +impl_hex_deser!(StacksBlockId); +impl_hex_deser!(SortitionId); +impl_hex_deser!(ConsensusHash); +impl_hex_deser!(BlockHeaderHash); +impl_hex_deser!(Hash160); diff --git a/stackslib/src/net/api/postblock_proposal.rs b/stackslib/src/net/api/postblock_proposal.rs index b804af0576a..6c1d5526b5d 100644 --- a/stackslib/src/net/api/postblock_proposal.rs +++ b/stackslib/src/net/api/postblock_proposal.rs @@ -30,17 +30,17 @@ use stacks_common::types::chainstate::{ }; use stacks_common::types::net::PeerHost; use stacks_common::types::StacksPublicKeyBuffer; -use stacks_common::util::get_epoch_time_ms; use stacks_common::util::hash::{hex_bytes, to_hex, Hash160, Sha256Sum, Sha512Trunc256Sum}; use stacks_common::util::retry::BoundReader; +use stacks_common::util::{get_epoch_time_ms, get_epoch_time_secs}; use crate::burnchains::affirmation::AffirmationMap; use crate::burnchains::Txid; -use crate::chainstate::burn::db::sortdb::SortitionDB; +use crate::chainstate::burn::db::sortdb::{SortitionDB, SortitionHandleConn}; use crate::chainstate::nakamoto::miner::NakamotoBlockBuilder; use crate::chainstate::nakamoto::{NakamotoBlock, NakamotoChainState}; use crate::chainstate::stacks::db::blocks::MINIMUM_TX_FEE_RATE_PER_BYTE; -use crate::chainstate::stacks::db::StacksChainState; +use crate::chainstate::stacks::db::{StacksBlockHeaderTypes, StacksChainState}; use crate::chainstate::stacks::miner::{BlockBuilder, BlockLimitFunction, TransactionResult}; use crate::chainstate::stacks::{ Error as ChainError, StacksBlock, StacksBlockHeader, StacksTransaction, TransactionPayload, @@ -63,6 +63,9 @@ use crate::net::{ }; use crate::util_lib::db::Error as DBError; +#[cfg(any(test, feature = "testing"))] +pub static TEST_VALIDATE_STALL: std::sync::Mutex> = std::sync::Mutex::new(None); + // This enum is used to supply a `reason_code` for validation // rejection responses. This is serialized as an enum with string // type (in jsonschema terminology). @@ -108,6 +111,18 @@ pub struct BlockValidateRejectReason { pub reason_code: ValidateRejectCode, } +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] +pub enum BlockProposalResult { + Accepted, + Error, +} + +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] +pub struct BlockProposalResponse { + pub result: BlockProposalResult, + pub message: String, +} + impl From for BlockValidateRejectReason where T: Into, @@ -206,8 +221,8 @@ impl NakamotoBlockProposal { }); } - let burn_dbconn = sortdb.index_conn(); let sort_tip = SortitionDB::get_canonical_sortition_tip(sortdb.conn())?; + let burn_dbconn: SortitionHandleConn = sortdb.index_handle(&sort_tip); let mut db_handle = sortdb.index_handle(&sort_tip); let expected_burn_opt = NakamotoChainState::get_expected_burns(&mut db_handle, chainstate.db(), &self.block)?; @@ -236,6 +251,27 @@ impl NakamotoBlockProposal { reason_code: ValidateRejectCode::InvalidBlock, reason: "Invalid parent block".into(), })?; + + // Validate the block's timestamp. It must be: + // - Greater than the parent block's timestamp + // - Less than 15 seconds into the future + if let StacksBlockHeaderTypes::Nakamoto(parent_nakamoto_header) = + &parent_stacks_header.anchored_header + { + if self.block.header.timestamp <= parent_nakamoto_header.timestamp { + return Err(BlockValidateRejectReason { + reason_code: ValidateRejectCode::InvalidBlock, + reason: "Block timestamp is not greater than parent block".into(), + }); + } + } + if self.block.header.timestamp > get_epoch_time_secs() + 15 { + return Err(BlockValidateRejectReason { + reason_code: ValidateRejectCode::InvalidBlock, + reason: "Block timestamp is too far into the future".into(), + }); + } + let tenure_change = self .block .txs @@ -257,6 +293,7 @@ impl NakamotoBlockProposal { self.block.header.burn_spent, tenure_change, coinbase, + self.block.header.pox_treatment.len(), )?; let mut miner_tenure_info = @@ -304,7 +341,10 @@ impl NakamotoBlockProposal { block.header.miner_signature = self.block.header.miner_signature.clone(); block.header.signer_signature = self.block.header.signer_signature.clone(); - // Assuming `tx_nerkle_root` has been checked we don't need to hash the whole block + // Clone the timestamp from the block proposal, which has already been validated + block.header.timestamp = self.block.header.timestamp; + + // Assuming `tx_merkle_root` has been checked we don't need to hash the whole block let expected_block_header_hash = self.block.header.block_hash(); let computed_block_header_hash = block.header.block_hash(); @@ -323,6 +363,24 @@ impl NakamotoBlockProposal { }); } + #[cfg(any(test, feature = "testing"))] + { + if *TEST_VALIDATE_STALL.lock().unwrap() == Some(true) { + // Do an extra check just so we don't log EVERY time. + warn!("Block validation is stalled due to testing directive."; + "block_id" => %block.block_id(), + "height" => block.header.chain_length, + ); + while *TEST_VALIDATE_STALL.lock().unwrap() == Some(true) { + std::thread::sleep(std::time::Duration::from_millis(10)); + } + info!("Block validation is no longer stalled due to testing directive."; + "block_id" => %block.block_id(), + "height" => block.header.chain_length, + ); + } + } + info!( "Participant: validated anchored block"; "block_header_hash" => %computed_block_header_hash, @@ -454,6 +512,15 @@ impl RPCRequestHandler for RPCBlockProposalRequestHandler { .take() .ok_or(NetError::SendError("`block_proposal` not set".into()))?; + info!( + "Received block proposal request"; + "signer_sighash" => %block_proposal.block.header.signer_signature_hash(), + "block_header_hash" => %block_proposal.block.header.block_hash(), + "height" => block_proposal.block.header.chain_length, + "tx_count" => block_proposal.block.txs.len(), + "parent_stacks_block_id" => %block_proposal.block.header.parent_block_id, + ); + let res = node.with_node_state(|network, sortdb, chainstate, _mempool, rpc_args| { if network.is_proposal_thread_running() { return Err(( @@ -518,7 +585,7 @@ impl HttpResponse for RPCBlockProposalRequestHandler { preamble: &HttpResponsePreamble, body: &[u8], ) -> Result { - let response: BlockValidateResponse = parse_json(preamble, body)?; + let response: BlockProposalResponse = parse_json(preamble, body)?; HttpResponsePayload::try_from_json(response) } } diff --git a/stackslib/src/net/api/postblock_v3.rs b/stackslib/src/net/api/postblock_v3.rs new file mode 100644 index 00000000000..bcf994d4884 --- /dev/null +++ b/stackslib/src/net/api/postblock_v3.rs @@ -0,0 +1,189 @@ +// Copyright (C) 2024 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use regex::{Captures, Regex}; +use stacks_common::codec::{Error as CodecError, StacksMessageCodec, MAX_PAYLOAD_LEN}; +use stacks_common::types::net::PeerHost; + +use super::postblock::StacksBlockAcceptedData; +use crate::chainstate::nakamoto::staging_blocks::NakamotoBlockObtainMethod; +use crate::chainstate::nakamoto::NakamotoBlock; +use crate::net::http::{ + parse_json, Error, HttpContentType, HttpError, HttpRequest, HttpRequestContents, + HttpRequestPreamble, HttpResponse, HttpResponseContents, HttpResponsePayload, + HttpResponsePreamble, +}; +use crate::net::httpcore::{ + HttpPreambleExtensions, RPCRequestHandler, StacksHttpRequest, StacksHttpResponse, +}; +use crate::net::relay::Relayer; +use crate::net::{Error as NetError, NakamotoBlocksData, StacksMessageType, StacksNodeState}; + +pub static PATH: &'static str = "/v3/blocks/upload/"; + +#[derive(Clone, Default)] +pub struct RPCPostBlockRequestHandler { + pub block: Option, +} + +impl RPCPostBlockRequestHandler { + /// Decode a bare block from the body + fn parse_postblock_octets(mut body: &[u8]) -> Result { + let block = NakamotoBlock::consensus_deserialize(&mut body).map_err(|e| { + if let CodecError::DeserializeError(msg) = e { + Error::DecodeError(format!("Failed to deserialize posted transaction: {}", msg)) + } else { + e.into() + } + })?; + Ok(block) + } +} + +/// Decode the HTTP request +impl HttpRequest for RPCPostBlockRequestHandler { + fn verb(&self) -> &'static str { + "POST" + } + + fn path_regex(&self) -> Regex { + Regex::new(&format!("^{PATH}$")).unwrap() + } + + fn metrics_identifier(&self) -> &str { + PATH + } + + /// Try to decode this request. + /// There's nothing to load here, so just make sure the request is well-formed. + fn try_parse_request( + &mut self, + preamble: &HttpRequestPreamble, + _captures: &Captures, + query: Option<&str>, + body: &[u8], + ) -> Result { + if preamble.get_content_length() == 0 { + return Err(Error::DecodeError( + "Invalid Http request: expected non-zero-length body for PostBlock".to_string(), + )); + } + + if preamble.get_content_length() > MAX_PAYLOAD_LEN { + return Err(Error::DecodeError( + "Invalid Http request: PostBlock body is too big".to_string(), + )); + } + + if Some(HttpContentType::Bytes) != preamble.content_type || preamble.content_type.is_none() + { + return Err(Error::DecodeError( + "Invalid Http request: PostBlock takes application/octet-stream".to_string(), + )); + } + + let block = Self::parse_postblock_octets(body)?; + + self.block = Some(block); + Ok(HttpRequestContents::new().query_string(query)) + } +} + +impl RPCRequestHandler for RPCPostBlockRequestHandler { + /// Reset internal state + fn restart(&mut self) { + self.block = None; + } + + /// Make the response + fn try_handle_request( + &mut self, + preamble: HttpRequestPreamble, + _contents: HttpRequestContents, + node: &mut StacksNodeState, + ) -> Result<(HttpResponsePreamble, HttpResponseContents), NetError> { + // get out the request body + let block = self + .block + .take() + .ok_or(NetError::SendError("`block` not set".into()))?; + + let response = node + .with_node_state(|network, sortdb, chainstate, _mempool, _rpc_args| { + let mut handle_conn = sortdb.index_handle_at_tip(); + let stacks_tip = network.stacks_tip.block_id(); + Relayer::process_new_nakamoto_block( + &network.burnchain, + &sortdb, + &mut handle_conn, + chainstate, + &stacks_tip, + &block, + None, + NakamotoBlockObtainMethod::Uploaded, + ) + }) + .map_err(|e| { + StacksHttpResponse::new_error(&preamble, &HttpError::new(400, e.to_string())) + }); + + let data_resp = match response { + Ok(accepted) => StacksBlockAcceptedData { + accepted, + stacks_block_id: block.block_id(), + }, + Err(e) => { + return e.try_into_contents().map_err(NetError::from); + } + }; + + // should set to relay... + if data_resp.accepted { + node.set_relay_message(StacksMessageType::NakamotoBlocks(NakamotoBlocksData { + blocks: vec![block], + })); + } + + let mut preamble = HttpResponsePreamble::ok_json(&preamble); + preamble.set_canonical_stacks_tip_height(Some(node.canonical_stacks_tip_height())); + let body = HttpResponseContents::try_from_json(&data_resp)?; + Ok((preamble, body)) + } +} + +/// Decode the HTTP response +impl HttpResponse for RPCPostBlockRequestHandler { + fn try_parse_response( + &self, + preamble: &HttpResponsePreamble, + body: &[u8], + ) -> Result { + let accepted: StacksBlockAcceptedData = parse_json(preamble, body)?; + HttpResponsePayload::try_from_json(accepted) + } +} + +impl StacksHttpRequest { + /// Make a new post-block request + pub fn new_post_block_v3(host: PeerHost, block: &NakamotoBlock) -> StacksHttpRequest { + StacksHttpRequest::new_for_peer( + host, + "POST".into(), + PATH.into(), + HttpRequestContents::new().payload_stacks(block), + ) + .expect("FATAL: failed to construct request from infallible data") + } +} diff --git a/stackslib/src/net/api/postfeerate.rs b/stackslib/src/net/api/postfeerate.rs index ab9691fdec0..376d8bf3da7 100644 --- a/stackslib/src/net/api/postfeerate.rs +++ b/stackslib/src/net/api/postfeerate.rs @@ -34,7 +34,9 @@ use crate::chainstate::stacks::db::blocks::MINIMUM_TX_FEE_RATE_PER_BYTE; use crate::chainstate::stacks::db::StacksChainState; use crate::chainstate::stacks::TransactionPayload; use crate::core::mempool::MemPoolDB; -use crate::cost_estimates::FeeRateEstimate; +use crate::core::StacksEpoch; +use crate::cost_estimates::metrics::CostMetric; +use crate::cost_estimates::{CostEstimator, FeeEstimator, FeeRateEstimate}; use crate::net::http::{ parse_json, Error, HttpBadRequest, HttpContentType, HttpNotFound, HttpRequest, HttpRequestContents, HttpRequestPreamble, HttpResponse, HttpResponseContents, @@ -92,6 +94,7 @@ pub struct RPCPostFeeRateRequestHandler { pub estimated_len: Option, pub transaction_payload: Option, } + impl RPCPostFeeRateRequestHandler { pub fn new() -> Self { Self { @@ -99,6 +102,48 @@ impl RPCPostFeeRateRequestHandler { transaction_payload: None, } } + + /// Estimate a transaction fee, given its execution cost estimation and length estimation + /// and cost estimators. + /// Returns Ok(fee structure) on success + /// Returns Err(HTTP response) on error + pub fn estimate_tx_fee_from_cost_and_length( + preamble: &HttpRequestPreamble, + fee_estimator: &dyn FeeEstimator, + metric: &dyn CostMetric, + estimated_cost: ExecutionCost, + estimated_len: u64, + stacks_epoch: StacksEpoch, + ) -> Result { + let scalar_cost = + metric.from_cost_and_len(&estimated_cost, &stacks_epoch.block_limit, estimated_len); + let fee_rates = fee_estimator.get_rate_estimates().map_err(|e| { + StacksHttpResponse::new_error( + &preamble, + &HttpBadRequest::new(format!( + "Estimator RPC endpoint failed to estimate fees for tx: {:?}", + &e + )), + ) + })?; + + let mut estimations = RPCFeeEstimate::estimate_fees(scalar_cost, fee_rates).to_vec(); + + let minimum_fee = estimated_len * MINIMUM_TX_FEE_RATE_PER_BYTE; + + for estimate in estimations.iter_mut() { + if estimate.fee < minimum_fee { + estimate.fee = minimum_fee; + } + } + + Ok(RPCFeeEstimateResponse { + estimated_cost, + estimations, + estimated_cost_scalar: scalar_cost, + cost_scalar_change_by_byte: metric.change_per_byte(), + }) + } } /// Decode the HTTP request @@ -206,39 +251,14 @@ impl RPCRequestHandler for RPCPostFeeRateRequestHandler { ) })?; - let scalar_cost = metric.from_cost_and_len( - &estimated_cost, - &stacks_epoch.block_limit, - estimated_len, - ); - let fee_rates = fee_estimator.get_rate_estimates().map_err(|e| { - StacksHttpResponse::new_error( - &preamble, - &HttpBadRequest::new(format!( - "Estimator RPC endpoint failed to estimate fees for tx {}: {:?}", - &tx.name(), - &e - )), - ) - })?; - - let mut estimations = - RPCFeeEstimate::estimate_fees(scalar_cost, fee_rates).to_vec(); - - let minimum_fee = estimated_len * MINIMUM_TX_FEE_RATE_PER_BYTE; - - for estimate in estimations.iter_mut() { - if estimate.fee < minimum_fee { - estimate.fee = minimum_fee; - } - } - - Ok(RPCFeeEstimateResponse { + Self::estimate_tx_fee_from_cost_and_length( + &preamble, + fee_estimator, + metric, estimated_cost, - estimations, - estimated_cost_scalar: scalar_cost, - cost_scalar_change_by_byte: metric.change_per_byte(), - }) + estimated_len, + stacks_epoch, + ) } else { debug!("Fee and cost estimation not configured on this stacks node"); Err(StacksHttpResponse::new_error( diff --git a/stackslib/src/net/api/postmempoolquery.rs b/stackslib/src/net/api/postmempoolquery.rs index 1e8caa18430..21558632208 100644 --- a/stackslib/src/net/api/postmempoolquery.rs +++ b/stackslib/src/net/api/postmempoolquery.rs @@ -29,6 +29,7 @@ use url::form_urlencoded; use {serde, serde_json}; use crate::burnchains::Txid; +use crate::chainstate::nakamoto::NakamotoChainState; use crate::chainstate::stacks::db::StacksChainState; use crate::chainstate::stacks::{Error as ChainError, StacksTransaction}; use crate::core::mempool::{decode_tx_stream, MemPoolDB, MemPoolSyncData}; @@ -89,8 +90,8 @@ pub struct StacksMemPoolStream { pub num_txs: u64, /// maximum we can visit in the query pub max_txs: u64, - /// height of the chain at time of query - pub height: u64, + /// coinbase height of the chain at time of query + pub coinbase_height: u64, /// Are we done sending transactions, and are now in the process of sending the trailing page /// ID? pub corked: bool, @@ -105,7 +106,7 @@ impl StacksMemPoolStream { mempool_db: DBConn, tx_query: MemPoolSyncData, max_txs: u64, - height: u64, + coinbase_height: u64, page_id_opt: Option, ) -> Self { let last_randomized_txid = page_id_opt.unwrap_or_else(|| { @@ -118,7 +119,7 @@ impl StacksMemPoolStream { last_randomized_txid: last_randomized_txid, num_txs: 0, max_txs: max_txs, - height: height, + coinbase_height, corked: false, finished: false, mempool_db, @@ -159,7 +160,7 @@ impl HttpChunkGenerator for StacksMemPoolStream { MemPoolDB::static_find_next_missing_transactions( &self.mempool_db, &self.tx_query, - self.height, + self.coinbase_height, &self.last_randomized_txid, 1, remaining, @@ -275,12 +276,18 @@ impl RPCRequestHandler for RPCMempoolQueryRequestHandler { let page_id = self.page_id.take(); let stream_res = node.with_node_state(|network, sortdb, chainstate, mempool, _rpc_args| { - let height = self.get_stacks_chain_tip(&preamble, sortdb, chainstate).map(|hdr| hdr.anchored_header.height()).unwrap_or(0); + let header = self.get_stacks_chain_tip(&preamble, sortdb, chainstate) + .map_err(|e| StacksHttpResponse::new_error(&preamble, &HttpServerError::new(format!("Failed to load chain tip: {:?}", &e))))?; + + let coinbase_height = NakamotoChainState::get_coinbase_height(&mut chainstate.index_conn(), &header.index_block_hash()) + .map_err(|e| StacksHttpResponse::new_error(&preamble, &HttpServerError::new(format!("Failed to load coinbase height: {:?}", &e))))? + .unwrap_or(0); + let max_txs = network.connection_opts.mempool_max_tx_query; debug!( "Begin mempool query"; "page_id" => %page_id.map(|txid| format!("{}", &txid)).unwrap_or("(none".to_string()), - "block_height" => height, + "coinbase_height" => coinbase_height, "max_txs" => max_txs ); @@ -291,7 +298,7 @@ impl RPCRequestHandler for RPCMempoolQueryRequestHandler { } }; - Ok(StacksMemPoolStream::new(mempool_db, mempool_query, max_txs, height, page_id)) + Ok(StacksMemPoolStream::new(mempool_db, mempool_query, max_txs, coinbase_height, page_id)) }); let stream = match stream_res { diff --git a/stackslib/src/net/api/tests/get_tenures_fork_info.rs b/stackslib/src/net/api/tests/get_tenures_fork_info.rs new file mode 100644 index 00000000000..2b5abcfb362 --- /dev/null +++ b/stackslib/src/net/api/tests/get_tenures_fork_info.rs @@ -0,0 +1,78 @@ +// Copyright (C) 2024 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use std::collections::BTreeMap; +use std::fmt::Display; +use std::net::{IpAddr, Ipv4Addr, SocketAddr}; + +use stacks_common::types::chainstate::{BurnchainHeaderHash, ConsensusHash}; +use stacks_common::types::net::PeerHost; + +use crate::net::api::get_tenures_fork_info::GetTenuresForkInfo; +use crate::net::api::getsortition::{GetSortitionHandler, QuerySpecifier}; +use crate::net::connection::ConnectionOptions; +use crate::net::http::{HttpRequestPreamble, HttpVersion}; +use crate::net::httpcore::{RPCRequestHandler, StacksHttp, StacksHttpPreamble}; +use crate::net::Error as NetError; + +fn make_preamble(start: &T, stop: &R) -> HttpRequestPreamble { + HttpRequestPreamble { + version: HttpVersion::Http11, + verb: "GET".into(), + path_and_query_str: format!("/v3/tenures/fork_info/{start}/{stop}"), + host: PeerHost::DNS("localhost".into(), 0), + content_type: None, + content_length: Some(0), + keep_alive: false, + headers: BTreeMap::new(), + } +} + +#[test] +fn test_parse_request() { + let addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 33333); + let http = StacksHttp::new(addr.clone(), &ConnectionOptions::default()); + let mut handler = GetTenuresForkInfo::default(); + + let tests = vec![ + ( + make_preamble(&ConsensusHash([0; 20]), &ConsensusHash([255; 20])), + Ok((ConsensusHash([0; 20]), ConsensusHash([255; 20]))), + ), + ( + make_preamble(&BurnchainHeaderHash([0; 32]), &ConsensusHash([255; 20])), + Err(NetError::NotFoundError), + ), + ( + make_preamble(&ConsensusHash([255; 20]), &BurnchainHeaderHash([0; 32])), + Err(NetError::NotFoundError), + ), + ]; + + for (inp, expected_result) in tests.into_iter() { + handler.restart(); + let parsed_request = http.handle_try_parse_request(&mut handler, &inp, &[]); + match expected_result { + Ok((start, stop)) => { + assert!(parsed_request.is_ok()); + assert_eq!(&handler.start_sortition, &Some(start)); + assert_eq!(&handler.stop_sortition, &Some(stop)); + } + Err(e) => { + assert_eq!(e, parsed_request.unwrap_err()); + } + } + } +} diff --git a/stackslib/src/net/api/tests/getblock.rs b/stackslib/src/net/api/tests/getblock.rs index c873c526206..d670b55edc6 100644 --- a/stackslib/src/net/api/tests/getblock.rs +++ b/stackslib/src/net/api/tests/getblock.rs @@ -18,7 +18,6 @@ use std::net::{IpAddr, Ipv4Addr, SocketAddr}; use clarity::vm::types::{QualifiedContractIdentifier, StacksAddressExtensions}; use clarity::vm::{ClarityName, ContractName}; -use stacks_common::codec::StacksMessageCodec; use stacks_common::types::chainstate::{ ConsensusHash, StacksAddress, StacksBlockId, StacksPrivateKey, }; diff --git a/stackslib/src/net/api/tests/getinfo.rs b/stackslib/src/net/api/tests/getinfo.rs index da1ca4ba19d..7d8aeff01c8 100644 --- a/stackslib/src/net/api/tests/getinfo.rs +++ b/stackslib/src/net/api/tests/getinfo.rs @@ -63,10 +63,10 @@ fn test_try_parse_request() { #[test] fn test_getinfo_compat() { - let old_getinfo_json = r#"{"peer_version":402653189,"pox_consensus":"b712eb731b613eebae814a8f416c5c15bc8391ec","burn_block_height":727631,"stable_pox_consensus":"53b5ed79842080500d7d83daa36aa1069dedf983","stable_burn_block_height":727624,"server_version":"stacks-node 0.0.1 (feat/faster-inv-generation:68f33190a, release build, linux [x86_64])","network_id":1,"parent_network_id":3652501241,"stacks_tip_height":52537,"stacks_tip":"b3183f2ac588e12319ff0fde78f97e62c92a218d87828c35710c29aaf7adbedc","stacks_tip_consensus_hash":"b712eb731b613eebae814a8f416c5c15bc8391ec","genesis_chainstate_hash":"74237aa39aa50a83de11a4f53e9d3bb7d43461d1de9873f402e5453ae60bc59b","unanchored_tip":"e76f68d607480e9984b4062b2691fb60a88423177898f5780b40ace17ae8982a","unanchored_seq":0,"exit_at_block_height":null}"#; - let getinfo_no_pubkey_hash_json = r#"{"peer_version":402653189,"pox_consensus":"b712eb731b613eebae814a8f416c5c15bc8391ec","burn_block_height":727631,"stable_pox_consensus":"53b5ed79842080500d7d83daa36aa1069dedf983","stable_burn_block_height":727624,"server_version":"stacks-node 0.0.1 (feat/faster-inv-generation:68f33190a, release build, linux [x86_64])","network_id":1,"parent_network_id":3652501241,"stacks_tip_height":52537,"stacks_tip":"b3183f2ac588e12319ff0fde78f97e62c92a218d87828c35710c29aaf7adbedc","stacks_tip_consensus_hash":"b712eb731b613eebae814a8f416c5c15bc8391ec","genesis_chainstate_hash":"74237aa39aa50a83de11a4f53e9d3bb7d43461d1de9873f402e5453ae60bc59b","unanchored_tip":"e76f68d607480e9984b4062b2691fb60a88423177898f5780b40ace17ae8982a","unanchored_seq":0,"exit_at_block_height":null,"node_public_key":"029b27d345e7bd2a6627262cefe6e97d9bc482f41ec32ec76a7bec391bb441798d"}"#; - let getinfo_no_pubkey_json = r#"{"peer_version":402653189,"pox_consensus":"b712eb731b613eebae814a8f416c5c15bc8391ec","burn_block_height":727631,"stable_pox_consensus":"53b5ed79842080500d7d83daa36aa1069dedf983","stable_burn_block_height":727624,"server_version":"stacks-node 0.0.1 (feat/faster-inv-generation:68f33190a, release build, linux [x86_64])","network_id":1,"parent_network_id":3652501241,"stacks_tip_height":52537,"stacks_tip":"b3183f2ac588e12319ff0fde78f97e62c92a218d87828c35710c29aaf7adbedc","stacks_tip_consensus_hash":"b712eb731b613eebae814a8f416c5c15bc8391ec","genesis_chainstate_hash":"74237aa39aa50a83de11a4f53e9d3bb7d43461d1de9873f402e5453ae60bc59b","unanchored_tip":"e76f68d607480e9984b4062b2691fb60a88423177898f5780b40ace17ae8982a","unanchored_seq":0,"exit_at_block_height":null,"node_public_key_hash":"046e6f832a83ff0da4a550907d3a44412cc1e4bf"}"#; - let getinfo_full_json = r#"{"peer_version":402653189,"pox_consensus":"b712eb731b613eebae814a8f416c5c15bc8391ec","burn_block_height":727631,"stable_pox_consensus":"53b5ed79842080500d7d83daa36aa1069dedf983","stable_burn_block_height":727624,"server_version":"stacks-node 0.0.1 (feat/faster-inv-generation:68f33190a, release build, linux [x86_64])","network_id":1,"parent_network_id":3652501241,"stacks_tip_height":52537,"stacks_tip":"b3183f2ac588e12319ff0fde78f97e62c92a218d87828c35710c29aaf7adbedc","stacks_tip_consensus_hash":"b712eb731b613eebae814a8f416c5c15bc8391ec","genesis_chainstate_hash":"74237aa39aa50a83de11a4f53e9d3bb7d43461d1de9873f402e5453ae60bc59b","unanchored_tip":"e76f68d607480e9984b4062b2691fb60a88423177898f5780b40ace17ae8982a","unanchored_seq":0,"exit_at_block_height":null,"node_public_key":"029b27d345e7bd2a6627262cefe6e97d9bc482f41ec32ec76a7bec391bb441798d","node_public_key_hash":"046e6f832a83ff0da4a550907d3a44412cc1e4bf"}"#; + let old_getinfo_json = r#"{"peer_version":402653189,"pox_consensus":"b712eb731b613eebae814a8f416c5c15bc8391ec","burn_block_height":727631,"stable_pox_consensus":"53b5ed79842080500d7d83daa36aa1069dedf983","stable_burn_block_height":727624,"server_version":"stacks-node 0.0.1 (feat/faster-inv-generation:68f33190a, release build, linux [x86_64])","network_id":1,"parent_network_id":3652501241,"stacks_tip_height":52537,"stacks_tip":"b3183f2ac588e12319ff0fde78f97e62c92a218d87828c35710c29aaf7adbedc","stacks_tip_consensus_hash":"b712eb731b613eebae814a8f416c5c15bc8391ec","genesis_chainstate_hash":"74237aa39aa50a83de11a4f53e9d3bb7d43461d1de9873f402e5453ae60bc59b","unanchored_tip":"e76f68d607480e9984b4062b2691fb60a88423177898f5780b40ace17ae8982a","unanchored_seq":0,"exit_at_block_height":null,"is_fully_synced":false}"#; + let getinfo_no_pubkey_hash_json = r#"{"peer_version":402653189,"pox_consensus":"b712eb731b613eebae814a8f416c5c15bc8391ec","burn_block_height":727631,"stable_pox_consensus":"53b5ed79842080500d7d83daa36aa1069dedf983","stable_burn_block_height":727624,"server_version":"stacks-node 0.0.1 (feat/faster-inv-generation:68f33190a, release build, linux [x86_64])","network_id":1,"parent_network_id":3652501241,"stacks_tip_height":52537,"stacks_tip":"b3183f2ac588e12319ff0fde78f97e62c92a218d87828c35710c29aaf7adbedc","stacks_tip_consensus_hash":"b712eb731b613eebae814a8f416c5c15bc8391ec","genesis_chainstate_hash":"74237aa39aa50a83de11a4f53e9d3bb7d43461d1de9873f402e5453ae60bc59b","unanchored_tip":"e76f68d607480e9984b4062b2691fb60a88423177898f5780b40ace17ae8982a","unanchored_seq":0,"exit_at_block_height":null,"is_fully_synced":false,"node_public_key":"029b27d345e7bd2a6627262cefe6e97d9bc482f41ec32ec76a7bec391bb441798d"}"#; + let getinfo_no_pubkey_json = r#"{"peer_version":402653189,"pox_consensus":"b712eb731b613eebae814a8f416c5c15bc8391ec","burn_block_height":727631,"stable_pox_consensus":"53b5ed79842080500d7d83daa36aa1069dedf983","stable_burn_block_height":727624,"server_version":"stacks-node 0.0.1 (feat/faster-inv-generation:68f33190a, release build, linux [x86_64])","network_id":1,"parent_network_id":3652501241,"stacks_tip_height":52537,"stacks_tip":"b3183f2ac588e12319ff0fde78f97e62c92a218d87828c35710c29aaf7adbedc","stacks_tip_consensus_hash":"b712eb731b613eebae814a8f416c5c15bc8391ec","genesis_chainstate_hash":"74237aa39aa50a83de11a4f53e9d3bb7d43461d1de9873f402e5453ae60bc59b","unanchored_tip":"e76f68d607480e9984b4062b2691fb60a88423177898f5780b40ace17ae8982a","unanchored_seq":0,"exit_at_block_height":null,"is_fully_synced":false,"node_public_key_hash":"046e6f832a83ff0da4a550907d3a44412cc1e4bf"}"#; + let getinfo_full_json = r#"{"peer_version":402653189,"pox_consensus":"b712eb731b613eebae814a8f416c5c15bc8391ec","burn_block_height":727631,"stable_pox_consensus":"53b5ed79842080500d7d83daa36aa1069dedf983","stable_burn_block_height":727624,"server_version":"stacks-node 0.0.1 (feat/faster-inv-generation:68f33190a, release build, linux [x86_64])","network_id":1,"parent_network_id":3652501241,"stacks_tip_height":52537,"stacks_tip":"b3183f2ac588e12319ff0fde78f97e62c92a218d87828c35710c29aaf7adbedc","stacks_tip_consensus_hash":"b712eb731b613eebae814a8f416c5c15bc8391ec","genesis_chainstate_hash":"74237aa39aa50a83de11a4f53e9d3bb7d43461d1de9873f402e5453ae60bc59b","unanchored_tip":"e76f68d607480e9984b4062b2691fb60a88423177898f5780b40ace17ae8982a","unanchored_seq":0,"exit_at_block_height":null,"is_fully_synced":false,"node_public_key":"029b27d345e7bd2a6627262cefe6e97d9bc482f41ec32ec76a7bec391bb441798d","node_public_key_hash":"046e6f832a83ff0da4a550907d3a44412cc1e4bf"}"#; // they all parse for json_obj in &[ diff --git a/stackslib/src/net/api/tests/getmicroblocks_confirmed.rs b/stackslib/src/net/api/tests/getmicroblocks_confirmed.rs index a4eb372abf5..421264fd9a0 100644 --- a/stackslib/src/net/api/tests/getmicroblocks_confirmed.rs +++ b/stackslib/src/net/api/tests/getmicroblocks_confirmed.rs @@ -23,7 +23,7 @@ use stacks_common::types::chainstate::{ ConsensusHash, StacksAddress, StacksBlockId, StacksPrivateKey, }; use stacks_common::types::net::PeerHost; -use stacks_common::types::Address; +use stacks_common::types::{Address, StacksEpochId}; use super::TestRPC; use crate::chainstate::stacks::db::blocks::test::*; @@ -91,13 +91,13 @@ fn test_try_make_response() { ) .unwrap(); - let parent_block = make_codec_test_block(25); + let parent_block = make_codec_test_block(25, StacksEpochId::latest()); let parent_consensus_hash = ConsensusHash([0x02; 20]); let mut mblocks = make_sample_microblock_stream(&privk, &parent_block.block_hash()); mblocks.truncate(15); - let mut child_block = make_codec_test_block(25); + let mut child_block = make_codec_test_block(25, StacksEpochId::latest()); let child_consensus_hash = ConsensusHash([0x03; 20]); child_block.header.parent_block = parent_block.block_hash(); diff --git a/stackslib/src/net/api/tests/getmicroblocks_indexed.rs b/stackslib/src/net/api/tests/getmicroblocks_indexed.rs index 0676ecc4978..aba7fd5c237 100644 --- a/stackslib/src/net/api/tests/getmicroblocks_indexed.rs +++ b/stackslib/src/net/api/tests/getmicroblocks_indexed.rs @@ -23,7 +23,7 @@ use stacks_common::types::chainstate::{ ConsensusHash, StacksAddress, StacksBlockId, StacksPrivateKey, }; use stacks_common::types::net::PeerHost; -use stacks_common::types::Address; +use stacks_common::types::{Address, StacksEpochId}; use super::TestRPC; use crate::chainstate::stacks::db::blocks::test::*; @@ -89,7 +89,7 @@ fn test_try_make_response() { "eb05c83546fdd2c79f10f5ad5434a90dd28f7e3acb7c092157aa1bc3656b012c01", ) .unwrap(); - let parent_block = make_codec_test_block(25); + let parent_block = make_codec_test_block(25, StacksEpochId::latest()); let parent_consensus_hash = ConsensusHash([0x02; 20]); let parent_index_block_hash = StacksBlockHeader::make_index_block_hash( &parent_consensus_hash, @@ -99,7 +99,7 @@ fn test_try_make_response() { let mut mblocks = make_sample_microblock_stream(&privk, &parent_block.block_hash()); mblocks.truncate(15); - let mut child_block = make_codec_test_block(25); + let mut child_block = make_codec_test_block(25, StacksEpochId::latest()); let child_consensus_hash = ConsensusHash([0x03; 20]); child_block.header.parent_block = parent_block.block_hash(); diff --git a/stackslib/src/net/api/tests/getsortition.rs b/stackslib/src/net/api/tests/getsortition.rs new file mode 100644 index 00000000000..8541b73eb6d --- /dev/null +++ b/stackslib/src/net/api/tests/getsortition.rs @@ -0,0 +1,101 @@ +// Copyright (C) 2024 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use std::collections::BTreeMap; +use std::net::{IpAddr, Ipv4Addr, SocketAddr}; + +use stacks_common::types::chainstate::{BurnchainHeaderHash, ConsensusHash}; +use stacks_common::types::net::PeerHost; + +use crate::net::api::getsortition::{GetSortitionHandler, QuerySpecifier}; +use crate::net::connection::ConnectionOptions; +use crate::net::http::{Error as HttpError, HttpRequestPreamble, HttpVersion}; +use crate::net::httpcore::{RPCRequestHandler, StacksHttp, StacksHttpPreamble}; +use crate::net::Error as NetError; + +fn make_preamble(query: &str) -> HttpRequestPreamble { + HttpRequestPreamble { + version: HttpVersion::Http11, + verb: "GET".into(), + path_and_query_str: format!("/v3/sortitions{query}"), + host: PeerHost::DNS("localhost".into(), 0), + content_type: None, + content_length: Some(0), + keep_alive: false, + headers: BTreeMap::new(), + } +} + +#[test] +fn test_parse_request() { + let addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 33333); + let http = StacksHttp::new(addr.clone(), &ConnectionOptions::default()); + let mut handler = GetSortitionHandler::new(); + + let tests = vec![ + (make_preamble(""), Ok(QuerySpecifier::Latest)), + ( + make_preamble("/consensus/deadbeef00deadbeef01deadbeef02deadbeef03"), + Ok(QuerySpecifier::ConsensusHash( + ConsensusHash::from_hex("deadbeef00deadbeef01deadbeef02deadbeef03").unwrap(), + )), + ), + ( + make_preamble("/burn/00112233445566778899aabbccddeeff00112233445566778899aabbccddeeff"), + Ok(QuerySpecifier::BurnchainHeaderHash( + BurnchainHeaderHash::from_hex( + "00112233445566778899aabbccddeeff00112233445566778899aabbccddeeff", + ) + .unwrap(), + )), + ), + ( + make_preamble("/burn_height/100"), + Ok(QuerySpecifier::BlockHeight(100)), + ), + ( + make_preamble("/burn_height/a1be"), + Err(HttpError::DecodeError("invalid digit found in string".into()).into()), + ), + ( + make_preamble("/burn/a1be0000"), + Err(HttpError::DecodeError("bad length 8 for hex string".into()).into()), + ), + ( + make_preamble("/consensus/a1be0000"), + Err(HttpError::DecodeError("bad length 8 for hex string".into()).into()), + ), + ( + make_preamble("/burn_height/20/consensus/deadbeef00deadbeef01deadbeef02deadbeef03"), + Err(NetError::NotFoundError), + ), + ]; + + for (inp, expected_result) in tests.into_iter() { + handler.restart(); + let parsed_request = http.handle_try_parse_request(&mut handler, &inp, &[]); + eprintln!("{}", &inp.path_and_query_str); + eprintln!("{parsed_request:?}"); + match expected_result { + Ok(query) => { + assert!(parsed_request.is_ok()); + assert_eq!(&handler.query, &query); + } + Err(e) => { + assert_eq!(e, parsed_request.unwrap_err()); + } + } + } +} diff --git a/stackslib/src/net/api/tests/getstxtransfercost.rs b/stackslib/src/net/api/tests/getstxtransfercost.rs index 6c4cccc369b..66e557f413b 100644 --- a/stackslib/src/net/api/tests/getstxtransfercost.rs +++ b/stackslib/src/net/api/tests/getstxtransfercost.rs @@ -25,6 +25,7 @@ use stacks_common::types::Address; use super::test_rpc; use crate::chainstate::stacks::db::blocks::MINIMUM_TX_FEE_RATE_PER_BYTE; use crate::core::BLOCK_LIMIT_MAINNET_21; +use crate::net::api::getstxtransfercost::SINGLESIG_TX_TRANSFER_LEN; use crate::net::api::*; use crate::net::connection::ConnectionOptions; use crate::net::httpcore::{ @@ -67,6 +68,7 @@ fn test_try_make_response() { let mut responses = test_rpc(function_name!(), vec![request]); assert_eq!(responses.len(), 1); + responses.reverse(); let response = responses.pop().unwrap(); debug!( @@ -80,5 +82,6 @@ fn test_try_make_response() { ); let fee_rate = response.decode_stx_transfer_fee().unwrap(); + debug!("fee_rate = {:?}", &fee_rate); assert_eq!(fee_rate, MINIMUM_TX_FEE_RATE_PER_BYTE); } diff --git a/stackslib/src/net/api/tests/mod.rs b/stackslib/src/net/api/tests/mod.rs index ce67147a9e4..f0a537d045e 100644 --- a/stackslib/src/net/api/tests/mod.rs +++ b/stackslib/src/net/api/tests/mod.rs @@ -22,10 +22,11 @@ use libstackerdb::SlotMetadata; use stacks_common::address::{AddressHashMode, C32_ADDRESS_VERSION_TESTNET_SINGLESIG}; use stacks_common::codec::StacksMessageCodec; use stacks_common::types::chainstate::{ - BlockHeaderHash, ConsensusHash, StacksAddress, StacksBlockId, StacksPrivateKey, StacksPublicKey, + BlockHeaderHash, BurnchainHeaderHash, ConsensusHash, SortitionId, StacksAddress, StacksBlockId, + StacksPrivateKey, StacksPublicKey, }; use stacks_common::util::get_epoch_time_secs; -use stacks_common::util::hash::{Hash160, Sha512Trunc256Sum}; +use stacks_common::util::hash::{to_hex, Hash160, Sha512Trunc256Sum}; use stacks_common::util::pipe::Pipe; use crate::burnchains::bitcoin::indexer::BitcoinIndexer; @@ -40,6 +41,7 @@ use crate::chainstate::stacks::{ TransactionAuth, TransactionPayload, TransactionPostConditionMode, TransactionVersion, }; use crate::core::MemPoolDB; +use crate::net::api::{prefix_hex, prefix_opt_hex}; use crate::net::db::PeerDB; use crate::net::httpcore::{StacksHttpRequest, StacksHttpResponse}; use crate::net::relay::Relayer; @@ -47,10 +49,12 @@ use crate::net::rpc::ConversationHttp; use crate::net::test::{TestEventObserver, TestPeer, TestPeerConfig}; use crate::net::tests::inv::nakamoto::make_nakamoto_peers_from_invs; use crate::net::{ - Attachment, AttachmentInstance, RPCHandlerArgs, StackerDBConfig, StacksNodeState, UrlString, + Attachment, AttachmentInstance, MemPoolEventDispatcher, RPCHandlerArgs, StackerDBConfig, + StacksNodeState, UrlString, }; mod callreadonly; +mod get_tenures_fork_info; mod getaccount; mod getattachment; mod getattachmentsinv; @@ -69,6 +73,7 @@ mod getmicroblocks_indexed; mod getmicroblocks_unconfirmed; mod getneighbors; mod getpoxinfo; +mod getsortition; mod getstackerdbchunk; mod getstackerdbmetadata; mod getstxtransfercost; @@ -77,6 +82,8 @@ mod gettenureinfo; mod gettransaction_unconfirmed; mod liststackerdbreplicas; mod postblock; +mod postblock_proposal; +mod postblock_v3; mod postfeerate; mod postmempoolquery; mod postmicroblock; @@ -258,6 +265,7 @@ impl<'a> TestRPC<'a> { runtime: 2000000, }; peer_1_config.connection_opts.maximum_call_argument_size = 4096; + peer_1_config.connection_opts.block_proposal_token = Some("password".to_string()); peer_2_config.connection_opts.read_only_call_limit = ExecutionCost { write_length: 0, @@ -267,6 +275,7 @@ impl<'a> TestRPC<'a> { runtime: 2000000, }; peer_2_config.connection_opts.maximum_call_argument_size = 4096; + peer_2_config.connection_opts.block_proposal_token = Some("password".to_string()); // stacker DBs get initialized thru reconfiguration when the above block gets processed peer_1_config.add_stacker_db( @@ -454,7 +463,7 @@ impl<'a> TestRPC<'a> { StacksBlockBuilder::make_anchored_block_from_txs( block_builder, chainstate, - &sortdb.index_conn(), + &sortdb.index_handle_at_tip(), vec![tx_coinbase_signed.clone(), tx_contract_signed.clone()], ) .unwrap(); @@ -477,7 +486,7 @@ impl<'a> TestRPC<'a> { let sortdb = peer_1.sortdb.take().unwrap(); Relayer::setup_unconfirmed_state(peer_1.chainstate(), &sortdb).unwrap(); let mblock = { - let sort_iconn = sortdb.index_conn(); + let sort_iconn = sortdb.index_handle_at_tip(); let mut microblock_builder = StacksMicroblockBuilder::new( stacks_block.block_hash(), consensus_hash.clone(), @@ -529,11 +538,11 @@ impl<'a> TestRPC<'a> { let sortdb2 = peer_2.sortdb.take().unwrap(); peer_1 .chainstate() - .reload_unconfirmed_state(&sortdb1.index_conn(), canonical_tip.clone()) + .reload_unconfirmed_state(&sortdb1.index_handle_at_tip(), canonical_tip.clone()) .unwrap(); peer_2 .chainstate() - .reload_unconfirmed_state(&sortdb2.index_conn(), canonical_tip.clone()) + .reload_unconfirmed_state(&sortdb2.index_handle_at_tip(), canonical_tip.clone()) .unwrap(); peer_1.sortdb = Some(sortdb1); peer_2.sortdb = Some(sortdb2); @@ -591,6 +600,7 @@ impl<'a> TestRPC<'a> { peer_1.chainstate(), &consensus_hash, &stacks_block.block_hash(), + true, txid.clone(), tx_bytes, tx_fee, @@ -732,7 +742,7 @@ impl<'a> TestRPC<'a> { StacksBlockBuilder::make_anchored_block_from_txs( block_builder, chainstate, - &sortdb.index_conn(), + &sortdb.index_handle_at_tip(), vec![tx_coinbase_signed.clone()], ) .unwrap(); @@ -907,9 +917,17 @@ impl<'a> TestRPC<'a> { } } + pub fn run(self, requests: Vec) -> Vec { + self.run_with_observer(requests, None) + } + /// Run zero or more HTTP requests on this setup RPC test harness. /// Return the list of responses. - pub fn run(self, requests: Vec) -> Vec { + pub fn run_with_observer( + self, + requests: Vec, + event_observer: Option<&dyn MemPoolEventDispatcher>, + ) -> Vec { let mut peer_1 = self.peer_1; let mut peer_2 = self.peer_2; let peer_1_indexer = self.peer_1_indexer; @@ -943,13 +961,15 @@ impl<'a> TestRPC<'a> { } { - let rpc_args = RPCHandlerArgs::default(); + let mut rpc_args = RPCHandlerArgs::default(); + rpc_args.event_observer = event_observer; let mut node_state = StacksNodeState::new( &mut peer_1.network, &peer_1_sortdb, &mut peer_1_stacks_node.chainstate, &mut peer_1_mempool, &rpc_args, + false, ); convo_1.chat(&mut node_state).unwrap(); } @@ -985,13 +1005,15 @@ impl<'a> TestRPC<'a> { } { - let rpc_args = RPCHandlerArgs::default(); + let mut rpc_args = RPCHandlerArgs::default(); + rpc_args.event_observer = event_observer; let mut node_state = StacksNodeState::new( &mut peer_2.network, &peer_2_sortdb, &mut peer_2_stacks_node.chainstate, &mut peer_2_mempool, &rpc_args, + false, ); convo_2.chat(&mut node_state).unwrap(); } @@ -1038,6 +1060,7 @@ impl<'a> TestRPC<'a> { &mut peer_1_stacks_node.chainstate, &mut peer_1_mempool, &rpc_args, + false, ); convo_1.chat(&mut node_state).unwrap(); } @@ -1073,3 +1096,95 @@ pub fn test_rpc(test_name: &str, requests: Vec) -> Vec = + prefix_opt_hex::deserialize(&mut deserializer).unwrap(); + + assert_eq!(out, inp); + if test.is_some() { + assert_eq!( + hex_str, + format!("\"0x{}\"", to_hex(&inp.as_ref().unwrap().0)) + ); + } else { + assert_eq!(hex_str, "null"); + } + } +} + +#[test] +fn prefixed_hex_bad_desers() { + let inp = "\"1\""; + let mut opt_deserializer = serde_json::Deserializer::from_str(inp); + assert_eq!( + prefix_opt_hex::deserialize::<_, BurnchainHeaderHash>(&mut opt_deserializer) + .unwrap_err() + .to_string(), + "invalid length 1, expected at least length 2 string".to_string(), + ); + let inp = "\"0x\""; + let mut opt_deserializer = serde_json::Deserializer::from_str(inp); + assert_eq!( + prefix_opt_hex::deserialize::<_, BurnchainHeaderHash>(&mut opt_deserializer) + .unwrap_err() + .to_string(), + "bad length 0 for hex string".to_string(), + ); + let inp = "\"0x00112233445566778899aabbccddeeff00112233445566778899aabbccddeeff00\""; + let mut opt_deserializer = serde_json::Deserializer::from_str(inp); + assert_eq!( + prefix_opt_hex::deserialize::<_, BurnchainHeaderHash>(&mut opt_deserializer) + .unwrap_err() + .to_string(), + "bad length 66 for hex string".to_string(), + ); +} + +#[test] +fn prefixed_hex_serialization() { + let tests_32b = [ + [0u8; 32], + [1; 32], + [15; 32], + [ + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, + 11, 12, 13, 14, 15, + ], + ]; + + for test in tests_32b.iter() { + let inp = BurnchainHeaderHash(test.clone()); + let mut out_buff = Vec::new(); + let mut serializer = serde_json::Serializer::new(&mut out_buff); + prefix_hex::serialize(&inp, &mut serializer).unwrap(); + let hex_str = String::from_utf8(out_buff).unwrap(); + eprintln!("{hex_str}"); + + let mut deserializer = serde_json::Deserializer::from_str(&hex_str); + let out: BurnchainHeaderHash = prefix_hex::deserialize(&mut deserializer).unwrap(); + + assert_eq!(out, inp); + assert_eq!(hex_str, format!("\"0x{}\"", to_hex(&inp.0))); + } +} diff --git a/stackslib/src/net/api/tests/postblock.rs b/stackslib/src/net/api/tests/postblock.rs index 287e97f613b..7412df93349 100644 --- a/stackslib/src/net/api/tests/postblock.rs +++ b/stackslib/src/net/api/tests/postblock.rs @@ -20,7 +20,7 @@ use clarity::vm::types::{PrincipalData, QualifiedContractIdentifier, StacksAddre use clarity::vm::{ClarityName, ContractName, Value}; use stacks_common::types::chainstate::{ConsensusHash, StacksAddress}; use stacks_common::types::net::PeerHost; -use stacks_common::types::Address; +use stacks_common::types::{Address, StacksEpochId}; use super::TestRPC; use crate::chainstate::stacks::test::make_codec_test_block; @@ -38,7 +38,7 @@ fn test_try_parse_request() { let addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 33333); let mut http = StacksHttp::new(addr.clone(), &ConnectionOptions::default()); - let block = make_codec_test_block(3); + let block = make_codec_test_block(3, StacksEpochId::Epoch25); let request = StacksHttpRequest::new_post_block(addr.into(), ConsensusHash([0x11; 20]), block.clone()); let bytes = request.try_serialize().unwrap(); diff --git a/stackslib/src/net/api/tests/postblock_proposal.rs b/stackslib/src/net/api/tests/postblock_proposal.rs new file mode 100644 index 00000000000..6ab465a683c --- /dev/null +++ b/stackslib/src/net/api/tests/postblock_proposal.rs @@ -0,0 +1,426 @@ +// Copyright (C) 2013-2020 Blockstack PBC, a public benefit corporation +// Copyright (C) 2020-2024 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use std::cell::RefCell; +use std::net::{IpAddr, Ipv4Addr, SocketAddr}; +use std::rc::Rc; +use std::sync::{Arc, Condvar, Mutex}; + +use clarity::types::chainstate::{StacksPrivateKey, TrieHash}; +use clarity::util::secp256k1::MessageSignature; +use clarity::util::vrf::VRFProof; +use clarity::vm::ast::ASTRules; +use clarity::vm::types::{PrincipalData, QualifiedContractIdentifier, StacksAddressExtensions}; +use clarity::vm::{ClarityName, ContractName, Value}; +use mempool::{MemPoolDB, MemPoolEventDispatcher, ProposalCallbackReceiver}; +use postblock_proposal::{NakamotoBlockProposal, ValidateRejectCode}; +use stacks_common::bitvec::BitVec; +use stacks_common::types::chainstate::{ConsensusHash, StacksAddress}; +use stacks_common::types::net::PeerHost; +use stacks_common::types::{Address, StacksEpochId}; +use stacks_common::util::hash::{hex_bytes, Hash160, MerkleTree, Sha512Trunc256Sum}; + +use super::TestRPC; +use crate::chainstate::burn::db::sortdb::SortitionDB; +use crate::chainstate::burn::BlockSnapshot; +use crate::chainstate::nakamoto::miner::NakamotoBlockBuilder; +use crate::chainstate::nakamoto::{NakamotoBlock, NakamotoBlockHeader, NakamotoChainState}; +use crate::chainstate::stacks::db::StacksChainState; +use crate::chainstate::stacks::miner::{BlockBuilder, BlockLimitFunction}; +use crate::chainstate::stacks::test::{make_codec_test_block, make_codec_test_nakamoto_block}; +use crate::chainstate::stacks::{ + CoinbasePayload, StacksBlockHeader, StacksTransactionSigner, TenureChangeCause, + TenureChangePayload, TokenTransferMemo, TransactionAnchorMode, TransactionAuth, + TransactionPayload, TransactionPostConditionMode, TransactionVersion, +}; +use crate::core::BLOCK_LIMIT_MAINNET_21; +use crate::net::api::*; +use crate::net::connection::ConnectionOptions; +use crate::net::httpcore::{ + HttpRequestContentsExtensions, RPCRequestHandler, StacksHttp, StacksHttpRequest, +}; +use crate::net::relay::Relayer; +use crate::net::test::TestEventObserver; +use crate::net::{ProtocolFamily, TipRequest}; + +#[test] +fn test_try_parse_request() { + let addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 33333); + let mut http = StacksHttp::new(addr.clone(), &ConnectionOptions::default()); + + let block = make_codec_test_nakamoto_block(StacksEpochId::Epoch30, &StacksPrivateKey::new()); + let proposal = NakamotoBlockProposal { + block: block.clone(), + chain_id: 0x80000000, + }; + let mut request = StacksHttpRequest::new_for_peer( + addr.into(), + "POST".into(), + "/v2/block_proposal".into(), + HttpRequestContents::new().payload_json(serde_json::to_value(proposal).unwrap()), + ) + .expect("failed to construct request"); + let bytes = request.try_serialize().unwrap(); + + let (parsed_preamble, offset) = http.read_preamble(&bytes).unwrap(); + let mut handler = + postblock_proposal::RPCBlockProposalRequestHandler::new(Some("password".into())); + + // missing authorization header + let bad_request = http.handle_try_parse_request( + &mut handler, + &parsed_preamble.expect_request(), + &bytes[offset..], + ); + match bad_request { + Err(crate::net::Error::Http(crate::net::http::Error::Http(err_code, message))) => { + assert_eq!(err_code, 401); + assert_eq!(message, "Unauthorized"); + } + _ => panic!("expected error"), + } + + // add the authorization header + request.add_header("authorization".into(), "password".into()); + let bytes = request.try_serialize().unwrap(); + let (parsed_preamble, offset) = http.read_preamble(&bytes).unwrap(); + let mut parsed_request = http + .handle_try_parse_request( + &mut handler, + &parsed_preamble.expect_request(), + &bytes[offset..], + ) + .unwrap(); + + assert_eq!( + handler.block_proposal, + Some(NakamotoBlockProposal { + block, + chain_id: 0x80000000 + }) + ); + + // parsed request consumes headers that would not be in a constructed request + parsed_request.clear_headers(); + // but the authorization header should still be there + parsed_request.add_header("authorization".into(), "password".into()); + let (preamble, contents) = parsed_request.destruct(); + + assert_eq!(&preamble, request.preamble()); + + handler.restart(); + assert!(handler.auth.is_some()); + assert!(handler.block_proposal.is_none()); +} + +struct ProposalObserver { + results: Mutex< + Vec>, + >, + condvar: Condvar, +} + +impl ProposalObserver { + fn new() -> Self { + Self { + results: Mutex::new(vec![]), + condvar: Condvar::new(), + } + } +} + +impl ProposalCallbackReceiver for ProposalObserver { + fn notify_proposal_result( + &self, + result: Result< + postblock_proposal::BlockValidateOk, + postblock_proposal::BlockValidateReject, + >, + ) { + let mut results = self.results.lock().unwrap(); + results.push(result); + self.condvar.notify_one(); + } +} + +struct ProposalTestObserver { + pub proposal_observer: Arc>, +} + +impl ProposalTestObserver { + fn new() -> Self { + Self { + proposal_observer: Arc::new(Mutex::new(ProposalObserver::new())), + } + } +} + +impl ProposalCallbackReceiver for Arc> { + fn notify_proposal_result( + &self, + result: Result< + postblock_proposal::BlockValidateOk, + postblock_proposal::BlockValidateReject, + >, + ) { + let observer = self.lock().unwrap(); + observer.notify_proposal_result(result); + } +} + +impl MemPoolEventDispatcher for ProposalTestObserver { + fn get_proposal_callback_receiver(&self) -> Option> { + Some(Box::new(Arc::clone(&self.proposal_observer))) + } + + fn mempool_txs_dropped(&self, txids: Vec, reason: mempool::MemPoolDropReason) {} + + fn mined_block_event( + &self, + target_burn_height: u64, + block: &crate::chainstate::stacks::StacksBlock, + block_size_bytes: u64, + consumed: &ExecutionCost, + confirmed_microblock_cost: &ExecutionCost, + tx_results: Vec, + ) { + } + + fn mined_microblock_event( + &self, + microblock: &StacksMicroblock, + tx_results: Vec, + anchor_block_consensus_hash: ConsensusHash, + anchor_block: BlockHeaderHash, + ) { + } + + fn mined_nakamoto_block_event( + &self, + target_burn_height: u64, + block: &crate::chainstate::nakamoto::NakamotoBlock, + block_size_bytes: u64, + consumed: &ExecutionCost, + tx_results: Vec, + ) { + } +} + +#[test] +fn test_try_make_response() { + let addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 33333); + let test_observer = TestEventObserver::new(); + let mut rpc_test = TestRPC::setup_nakamoto(function_name!(), &test_observer); + let mut requests = vec![]; + + let tip = + SortitionDB::get_canonical_burn_chain_tip(&rpc_test.peer_1.sortdb.as_ref().unwrap().conn()) + .unwrap(); + let miner_privk = &rpc_test.peer_1.miner.nakamoto_miner_key(); + + let mut block = { + let chainstate = rpc_test.peer_1.chainstate(); + let parent_stacks_header = NakamotoChainState::get_block_header( + chainstate.db(), + &tip.get_canonical_stacks_block_id(), + ) + .unwrap() + .unwrap(); + + let proof_bytes = hex_bytes("9275df67a68c8745c0ff97b48201ee6db447f7c93b23ae24cdc2400f52fdb08a1a6ac7ec71bf9c9c76e96ee4675ebff60625af28718501047bfd87b810c2d2139b73c23bd69de66360953a642c2a330a").unwrap(); + let proof = VRFProof::from_bytes(&proof_bytes[..].to_vec()).unwrap(); + + let privk = StacksPrivateKey::from_hex( + "6d430bb91222408e7706c9001cfaeb91b08c2be6d5ac95779ab52c6b431950e001", + ) + .unwrap(); + + let stx_address = StacksAddress { + version: 1, + bytes: Hash160([0xff; 20]), + }; + let payload = TransactionPayload::TokenTransfer( + stx_address.into(), + 123, + TokenTransferMemo([0u8; 34]), + ); + + let auth = TransactionAuth::from_p2pkh(miner_privk).unwrap(); + let addr = auth.origin().address_testnet(); + let mut tx = StacksTransaction::new(TransactionVersion::Testnet, auth, payload); + tx.chain_id = 0x80000000; + tx.auth.set_origin_nonce(34); + tx.set_post_condition_mode(TransactionPostConditionMode::Allow); + tx.set_tx_fee(300); + let mut tx_signer = StacksTransactionSigner::new(&tx); + tx_signer.sign_origin(miner_privk).unwrap(); + let tx = tx_signer.get_tx().unwrap(); + + let mut builder = NakamotoBlockBuilder::new( + &parent_stacks_header, + &tip.consensus_hash, + 25000, + None, + None, + 8, + ) + .unwrap(); + + rpc_test + .peer_1 + .with_db_state( + |sort_db: &mut SortitionDB, + chainstate: &mut StacksChainState, + _: &mut Relayer, + _: &mut MemPoolDB| { + let burn_dbconn = sort_db.index_handle_at_tip(); + let mut miner_tenure_info = builder + .load_tenure_info(chainstate, &burn_dbconn, None) + .unwrap(); + let mut tenure_tx = builder + .tenure_begin(&burn_dbconn, &mut miner_tenure_info) + .unwrap(); + builder.try_mine_tx_with_len( + &mut tenure_tx, + &tx, + tx.tx_len(), + &BlockLimitFunction::NO_LIMIT_HIT, + ASTRules::PrecheckSize, + ); + let block = builder.mine_nakamoto_block(&mut tenure_tx); + Ok(block) + }, + ) + .unwrap() + }; + + // Increment the timestamp by 1 to ensure it is different from the previous block + block.header.timestamp += 1; + rpc_test.peer_1.miner.sign_nakamoto_block(&mut block); + + // post the valid block proposal + let proposal = NakamotoBlockProposal { + block: block.clone(), + chain_id: 0x80000000, + }; + + let mut request = StacksHttpRequest::new_for_peer( + rpc_test.peer_1.to_peer_host(), + "POST".into(), + "/v2/block_proposal".into(), + HttpRequestContents::new().payload_json(serde_json::to_value(proposal).unwrap()), + ) + .expect("failed to construct request"); + request.add_header("authorization".into(), "password".into()); + requests.push(request); + + // Set the timestamp to a value in the past + block.header.timestamp -= 10000; + rpc_test.peer_1.miner.sign_nakamoto_block(&mut block); + + // post the invalid block proposal + let proposal = NakamotoBlockProposal { + block: block.clone(), + chain_id: 0x80000000, + }; + + let mut request = StacksHttpRequest::new_for_peer( + rpc_test.peer_1.to_peer_host(), + "POST".into(), + "/v2/block_proposal".into(), + HttpRequestContents::new().payload_json(serde_json::to_value(proposal).unwrap()), + ) + .expect("failed to construct request"); + request.add_header("authorization".into(), "password".into()); + requests.push(request); + + // Set the timestamp to a value in the future + block.header.timestamp += 20000; + rpc_test.peer_1.miner.sign_nakamoto_block(&mut block); + + // post the invalid block proposal + let proposal = NakamotoBlockProposal { + block: block.clone(), + chain_id: 0x80000000, + }; + + let mut request = StacksHttpRequest::new_for_peer( + rpc_test.peer_1.to_peer_host(), + "POST".into(), + "/v2/block_proposal".into(), + HttpRequestContents::new().payload_json(serde_json::to_value(proposal).unwrap()), + ) + .expect("failed to construct request"); + request.add_header("authorization".into(), "password".into()); + requests.push(request); + + // execute the requests + let observer = ProposalTestObserver::new(); + let proposal_observer = Arc::clone(&observer.proposal_observer); + + let mut responses = rpc_test.run_with_observer(requests, Some(&observer)); + + let response = responses.remove(0); + + // Wait for the results to be non-empty + loop { + if proposal_observer + .lock() + .unwrap() + .results + .lock() + .unwrap() + .len() + < 3 + { + std::thread::sleep(std::time::Duration::from_secs(1)); + } else { + break; + } + } + + let observer = proposal_observer.lock().unwrap(); + let mut results = observer.results.lock().unwrap(); + + let result = results.remove(0); + assert!(result.is_ok()); + + let result = results.remove(0); + match result { + Ok(_) => panic!("expected error"), + Err(postblock_proposal::BlockValidateReject { + reason_code, + reason, + .. + }) => { + assert_eq!(reason_code, ValidateRejectCode::InvalidBlock); + assert_eq!(reason, "Block timestamp is not greater than parent block"); + } + } + + let result = results.remove(0); + match result { + Ok(_) => panic!("expected error"), + Err(postblock_proposal::BlockValidateReject { + reason_code, + reason, + .. + }) => { + assert_eq!(reason_code, ValidateRejectCode::InvalidBlock); + assert_eq!(reason, "Block timestamp is too far into the future"); + } + } +} diff --git a/stackslib/src/net/api/tests/postblock_v3.rs b/stackslib/src/net/api/tests/postblock_v3.rs new file mode 100644 index 00000000000..e68d334239d --- /dev/null +++ b/stackslib/src/net/api/tests/postblock_v3.rs @@ -0,0 +1,165 @@ +// Copyright (C) 2024 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use std::net::{IpAddr, Ipv4Addr, SocketAddr}; + +use clarity::vm::types::{PrincipalData, QualifiedContractIdentifier, StacksAddressExtensions}; +use clarity::vm::{ClarityName, ContractName, Value}; +use stacks_common::types::chainstate::{ConsensusHash, StacksAddress, StacksPrivateKey}; +use stacks_common::types::net::PeerHost; +use stacks_common::types::{Address, StacksEpochId}; + +use super::TestRPC; +use crate::chainstate::stacks::test::make_codec_test_nakamoto_block; +use crate::chainstate::stacks::StacksBlockHeader; +use crate::core::BLOCK_LIMIT_MAINNET_21; +use crate::net::api::*; +use crate::net::connection::ConnectionOptions; +use crate::net::httpcore::{ + HttpRequestContentsExtensions, RPCRequestHandler, StacksHttp, StacksHttpRequest, +}; +use crate::net::test::TestEventObserver; +use crate::net::{ProtocolFamily, TipRequest}; + +#[test] +fn parse_request() { + let addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 33333); + let mut http = StacksHttp::new(addr.clone(), &ConnectionOptions::default()); + + let miner_sk = StacksPrivateKey::from_seed(&[0, 1, 2, 3, 4, 5, 6, 7, 8]); + let block = make_codec_test_nakamoto_block(StacksEpochId::Epoch30, &miner_sk); + let request = StacksHttpRequest::new_post_block_v3(addr.into(), &block); + let bytes = request.try_serialize().unwrap(); + + let (parsed_preamble, offset) = http.read_preamble(&bytes).unwrap(); + let mut handler = postblock_v3::RPCPostBlockRequestHandler::default(); + let mut parsed_request = http + .handle_try_parse_request( + &mut handler, + &parsed_preamble.expect_request(), + &bytes[offset..], + ) + .unwrap(); + + assert_eq!(handler.block, Some(block.clone())); + + // parsed request consumes headers that would not be in a constructed reqeuest + parsed_request.clear_headers(); + let (preamble, _contents) = parsed_request.destruct(); + + assert_eq!(&preamble, request.preamble()); + + handler.restart(); + assert!(handler.block.is_none()); + + // try to deal with an invalid block + let mut bad_block = block.clone(); + bad_block.txs.clear(); + + let mut http = StacksHttp::new(addr.clone(), &ConnectionOptions::default()); + let request = StacksHttpRequest::new_post_block_v3(addr.into(), &bad_block); + let bytes = request.try_serialize().unwrap(); + let (parsed_preamble, offset) = http.read_preamble(&bytes).unwrap(); + let mut handler = postblock_v3::RPCPostBlockRequestHandler::default(); + match http.handle_try_parse_request( + &mut handler, + &parsed_preamble.expect_request(), + &bytes[offset..], + ) { + Err(NetError::Http(Error::DecodeError(..))) => {} + _ => { + panic!("worked with bad block"); + } + } +} + +#[test] +fn handle_req_accepted() { + let addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 33333); + + let observer = TestEventObserver::new(); + let mut rpc_test = TestRPC::setup_nakamoto(function_name!(), &observer); + let (next_block, ..) = rpc_test.peer_1.single_block_tenure( + &rpc_test.privk1, + |_| {}, + |burn_ops| { + rpc_test.peer_2.next_burnchain_block(burn_ops.clone()); + }, + |_| true, + ); + let next_block_id = next_block.block_id(); + let mut requests = vec![]; + + // post the block + requests.push(StacksHttpRequest::new_post_block_v3( + addr.into(), + &next_block, + )); + + // idempotent + requests.push(StacksHttpRequest::new_post_block_v3( + addr.into(), + &next_block, + )); + + let mut responses = rpc_test.run(requests); + + let response = responses.remove(0); + info!( + "Response: {}", + std::str::from_utf8(&response.try_serialize().unwrap()).unwrap() + ); + + let resp = response.decode_stacks_block_accepted().unwrap(); + assert_eq!(resp.accepted, true); + assert_eq!(resp.stacks_block_id, next_block_id); + + let response = responses.remove(0); + info!( + "Response: {}", + std::str::from_utf8(&response.try_serialize().unwrap()).unwrap() + ); + let resp = response.decode_stacks_block_accepted().unwrap(); + assert_eq!(resp.accepted, false); + assert_eq!(resp.stacks_block_id, next_block_id); +} + +#[test] +fn handle_req_unknown_burn_block() { + let addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 33333); + + let observer = TestEventObserver::new(); + let mut rpc_test = TestRPC::setup_nakamoto(function_name!(), &observer); + // test with a consensus hash not known yet to the peer + let (next_block, ..) = + rpc_test + .peer_1 + .single_block_tenure(&rpc_test.privk1, |_| {}, |_| {}, |_| true); + let next_block_id = next_block.block_id(); + let requests = vec![StacksHttpRequest::new_post_block_v3( + addr.into(), + &next_block, + )]; + + let mut responses = rpc_test.run(requests); + let response = responses.remove(0); + info!( + "Response: {}", + std::str::from_utf8(&response.try_serialize().unwrap()).unwrap() + ); + + let (preamble, body) = response.destruct(); + assert_eq!(preamble.status_code, 400); +} diff --git a/stackslib/src/net/api/tests/postmempoolquery.rs b/stackslib/src/net/api/tests/postmempoolquery.rs index 1f528c57c56..69540248441 100644 --- a/stackslib/src/net/api/tests/postmempoolquery.rs +++ b/stackslib/src/net/api/tests/postmempoolquery.rs @@ -20,7 +20,7 @@ use std::net::{IpAddr, Ipv4Addr, SocketAddr}; use clarity::vm::types::{PrincipalData, QualifiedContractIdentifier, StacksAddressExtensions}; use clarity::vm::{ClarityName, ContractName, Value}; -use stacks_common::codec::{read_next, Error as CodecError, StacksMessageCodec}; +use stacks_common::codec::{Error as CodecError, StacksMessageCodec}; use stacks_common::types::chainstate::{ BlockHeaderHash, ConsensusHash, StacksAddress, StacksPrivateKey, }; @@ -174,6 +174,7 @@ fn test_stream_mempool_txs() { &mut chainstate, &ConsensusHash([0x1 + (block_height as u8); 20]), &BlockHeaderHash([0x2 + (block_height as u8); 32]), + false, // don't resolve the above chain tip since it doesn't exist txid.clone(), tx_bytes, tx_fee, diff --git a/stackslib/src/net/atlas/db.rs b/stackslib/src/net/atlas/db.rs index 784bff9639a..f971344a28a 100644 --- a/stackslib/src/net/atlas/db.rs +++ b/stackslib/src/net/atlas/db.rs @@ -38,9 +38,10 @@ use std::fs; use clarity::vm::types::QualifiedContractIdentifier; use rusqlite::types::{FromSql, FromSqlError, ToSql, ToSqlOutput, ValueRef}; -use rusqlite::{Connection, OpenFlags, OptionalExtension, Row, Transaction, NO_PARAMS}; +use rusqlite::{params, Connection, OpenFlags, OptionalExtension, Row, Transaction}; use stacks_common::codec::StacksMessageCodec; use stacks_common::types::chainstate::StacksBlockId; +use stacks_common::types::sqlite::NO_PARAMS; use stacks_common::util; use stacks_common::util::hash::{bin_bytes, hex_bytes, to_bin, to_hex, Hash160}; use stacks_common::util::log; @@ -206,11 +207,9 @@ impl AtlasDB { /// Get the database schema version, given a DB connection fn get_schema_version(conn: &Connection) -> Result { - let version = conn.query_row( - "SELECT MAX(version) from db_config", - rusqlite::NO_PARAMS, - |row| row.get(0), - )?; + let version = conn.query_row("SELECT MAX(version) from db_config", NO_PARAMS, |row| { + row.get(0) + })?; Ok(version) } @@ -228,7 +227,7 @@ impl AtlasDB { tx.execute( "INSERT INTO db_config (version) VALUES (?1)", - &[&ATLASDB_VERSION], + params![ATLASDB_VERSION], )?; if let Some(attachments) = genesis_attachments { @@ -236,10 +235,10 @@ impl AtlasDB { for attachment in attachments { tx.execute( "INSERT INTO attachments (hash, content, was_instantiated, created_at) VALUES (?, ?, 1, ?)", - &[ - &attachment.hash() as &dyn ToSql, - &attachment.content as &dyn ToSql, - &now as &dyn ToSql, + params![ + attachment.hash(), + attachment.content, + now, ], ) .map_err(db_error::SqliteError)?; @@ -348,7 +347,7 @@ impl AtlasDB { db_conn.execute( "INSERT OR REPLACE INTO db_config (version) VALUES (?1)", - &["2"], + params!["2"], )?; Ok(()) @@ -406,17 +405,17 @@ impl AtlasDB { tx.execute_batch(row_text)?; } - tx.execute("INSERT INTO db_config (version) VALUES (?1)", &["1"])?; + tx.execute("INSERT INTO db_config (version) VALUES (?1)", params!["1"])?; if let Some(attachments) = genesis_attachments { let now = util::get_epoch_time_secs() as i64; for attachment in attachments { tx.execute( "INSERT INTO attachments (hash, content, was_instantiated, created_at) VALUES (?, ?, 1, ?)", - rusqlite::params![ - &attachment.hash(), - &attachment.content, - &now, + params![ + attachment.hash(), + attachment.content, + now, ], )?; } @@ -462,9 +461,9 @@ impl AtlasDB { let min = page_index * AttachmentInstance::ATTACHMENTS_INV_PAGE_SIZE; let max = (page_index + 1) * AttachmentInstance::ATTACHMENTS_INV_PAGE_SIZE; let qry = "SELECT MIN(block_height) as min, MAX(block_height) as max FROM attachment_instances WHERE attachment_index >= ?1 AND attachment_index < ?2"; - let args = [&min as &dyn ToSql, &max as &dyn ToSql]; + let args = params![min, max]; let mut stmt = self.conn.prepare(&qry)?; - let mut rows = stmt.query(&args)?; + let mut rows = stmt.query(args)?; match rows.next() { Ok(Some(row)) => { @@ -498,12 +497,8 @@ impl AtlasDB { let min = page_index * AttachmentInstance::ATTACHMENTS_INV_PAGE_SIZE; let max = min + AttachmentInstance::ATTACHMENTS_INV_PAGE_SIZE; let qry = "SELECT attachment_index, is_available FROM attachment_instances WHERE attachment_index >= ?1 AND attachment_index < ?2 AND index_block_hash = ?3 ORDER BY attachment_index ASC"; - let args = [ - &min as &dyn ToSql, - &max as &dyn ToSql, - block_id as &dyn ToSql, - ]; - let rows = query_rows::<(u32, u32), _>(&self.conn, &qry, &args)?; + let args = params![min, max, block_id,]; + let rows = query_rows::<(u32, u32), _>(&self.conn, &qry, args)?; let mut bool_vector = vec![true; AttachmentInstance::ATTACHMENTS_INV_PAGE_SIZE as usize]; for (attachment_index, is_available) in rows.into_iter() { @@ -529,10 +524,10 @@ impl AtlasDB { let now = util::get_epoch_time_secs() as i64; let res = tx.execute( "INSERT OR REPLACE INTO attachments (hash, content, was_instantiated, created_at) VALUES (?, ?, 0, ?)", - &[ - &attachment.hash() as &dyn ToSql, - &attachment.content as &dyn ToSql, - &now as &dyn ToSql, + params![ + attachment.hash(), + attachment.content, + now, ], ); res.map_err(db_error::SqliteError)?; @@ -544,7 +539,7 @@ impl AtlasDB { let tx = self.tx_begin()?; let res = tx.execute( "DELETE FROM attachments WHERE hash IN (SELECT hash FROM attachments WHERE was_instantiated = 0 ORDER BY created_at ASC LIMIT ?)", - &[&k as &dyn ToSql], + params![k], ); res.map_err(db_error::SqliteError)?; tx.commit().map_err(db_error::SqliteError)?; @@ -557,7 +552,7 @@ impl AtlasDB { let tx = self.tx_begin()?; let res = tx.execute( "DELETE FROM attachments WHERE was_instantiated = 0 AND created_at < ?", - &[&cut_off as &dyn ToSql], + params![cut_off], ); res.map_err(db_error::SqliteError)?; tx.commit().map_err(db_error::SqliteError)?; @@ -586,11 +581,11 @@ impl AtlasDB { let tx = self.tx_begin()?; tx.execute( "INSERT OR REPLACE INTO attachments (hash, content, was_instantiated, created_at) VALUES (?, ?, 1, ?)", - rusqlite::params![&attachment.hash(), &attachment.content, &now], + params![attachment.hash(), attachment.content, now], )?; tx.execute( "UPDATE attachment_instances SET is_available = 1 WHERE content_hash = ?1 AND status = ?2", - rusqlite::params![&attachment.hash(), &AttachmentInstanceStatus::Checked], + params![attachment.hash(), AttachmentInstanceStatus::Checked], )?; tx.commit()?; Ok(()) @@ -603,8 +598,8 @@ impl AtlasDB { let hex_content_hash = to_hex(&content_hash.0[..]); let qry = "SELECT content, hash FROM attachments WHERE hash = ?1 AND was_instantiated = 0" .to_string(); - let args = [&hex_content_hash as &dyn ToSql]; - let row = query_row::(&self.conn, &qry, &args)?; + let args = params![hex_content_hash]; + let row = query_row::(&self.conn, &qry, args)?; Ok(row) } @@ -617,7 +612,7 @@ impl AtlasDB { let tx = self.tx_begin()?; let res = tx.execute( "DELETE FROM attachment_instances WHERE is_available = 0 AND created_at < ?", - &[&cut_off as &dyn ToSql], + params![cut_off], ); res.map_err(db_error::SqliteError)?; tx.commit().map_err(db_error::SqliteError)?; @@ -628,7 +623,7 @@ impl AtlasDB { &mut self, ) -> Result, db_error> { let qry = "SELECT * FROM attachment_instances WHERE is_available = 0 AND status = ?"; - let rows = query_rows(&self.conn, qry, &[&AttachmentInstanceStatus::Checked])?; + let rows = query_rows(&self.conn, qry, params![AttachmentInstanceStatus::Checked])?; Ok(rows) } @@ -638,7 +633,7 @@ impl AtlasDB { ) -> Result, db_error> { let hex_content_hash = to_hex(&content_hash.0[..]); let qry = "SELECT * FROM attachment_instances WHERE content_hash = ?1 AND status = ?2"; - let args = rusqlite::params![&hex_content_hash, &AttachmentInstanceStatus::Checked]; + let args = params![hex_content_hash, AttachmentInstanceStatus::Checked]; let rows = query_rows(&self.conn, qry, args)?; Ok(rows) } @@ -647,8 +642,8 @@ impl AtlasDB { let hex_content_hash = to_hex(&content_hash.0[..]); let qry = "SELECT content, hash FROM attachments WHERE hash = ?1 AND was_instantiated = 1" .to_string(); - let args = [&hex_content_hash as &dyn ToSql]; - let row = query_row::(&self.conn, &qry, &args)?; + let args = params![hex_content_hash]; + let row = query_row::(&self.conn, &qry, args)?; Ok(row) } @@ -681,7 +676,7 @@ impl AtlasDB { query_rows( &self.conn, "SELECT * FROM attachment_instances WHERE status = ?1 LIMIT ?2", - rusqlite::params![&AttachmentInstanceStatus::Queued, MAX_PROCESS_PER_ROUND], + params![AttachmentInstanceStatus::Queued, MAX_PROCESS_PER_ROUND], ) } @@ -694,12 +689,12 @@ impl AtlasDB { self.conn.execute( "UPDATE attachment_instances SET status = ?1, is_available = ?2 WHERE index_block_hash = ?3 AND contract_id = ?4 AND attachment_index = ?5", - rusqlite::params![ - &AttachmentInstanceStatus::Checked, - &is_available, - &attachment.index_block_hash, - &attachment.contract_id.to_string(), - &attachment.attachment_index, + params![ + AttachmentInstanceStatus::Checked, + is_available, + attachment.index_block_hash, + attachment.contract_id.to_string(), + attachment.attachment_index, ], )?; Ok(()) @@ -720,17 +715,17 @@ impl AtlasDB { attachment_index, block_height, is_available, metadata, contract_id, tx_id, status) VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7, ?8, ?9, ?10)", - rusqlite::params![ - &attachment.content_hash, - &now, - &attachment.index_block_hash, - &attachment.attachment_index, - &u64_to_sql(attachment.stacks_block_height)?, - &is_available, - &attachment.metadata, - &attachment.contract_id.to_string(), - &attachment.tx_id, - &status + params![ + attachment.content_hash, + now, + attachment.index_block_hash, + attachment.attachment_index, + u64_to_sql(attachment.stacks_block_height)?, + is_available, + attachment.metadata, + attachment.contract_id.to_string(), + attachment.tx_id, + status ], )?; sql_tx.commit()?; diff --git a/stackslib/src/net/atlas/tests.rs b/stackslib/src/net/atlas/tests.rs index 2ebcb71316e..8094c77799f 100644 --- a/stackslib/src/net/atlas/tests.rs +++ b/stackslib/src/net/atlas/tests.rs @@ -18,6 +18,7 @@ use std::collections::{BinaryHeap, HashMap, HashSet}; use std::{thread, time}; use clarity::vm::types::QualifiedContractIdentifier; +use rusqlite::params; use stacks_common::types::chainstate::{BlockHeaderHash, StacksBlockId}; use stacks_common::types::net::{PeerAddress, PeerHost}; use stacks_common::util::hash::Hash160; @@ -832,16 +833,16 @@ fn schema_2_migration() { attachment_index, block_height, is_available, metadata, contract_id, tx_id) VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7, ?8, ?9)", - rusqlite::params![ - &attachment.content_hash, - &0, - &attachment.index_block_hash, - &attachment.attachment_index, - &u64_to_sql(attachment.stacks_block_height).unwrap(), - &true, - &attachment.metadata, - &attachment.contract_id.to_string(), - &attachment.tx_id, + params![ + attachment.content_hash, + 0, + attachment.index_block_hash, + attachment.attachment_index, + u64_to_sql(attachment.stacks_block_height).unwrap(), + true, + attachment.metadata, + attachment.contract_id.to_string(), + attachment.tx_id, ], ) .unwrap(); diff --git a/stackslib/src/net/chat.rs b/stackslib/src/net/chat.rs index 52622d1e597..1e1fa79f5c9 100644 --- a/stackslib/src/net/chat.rs +++ b/stackslib/src/net/chat.rs @@ -128,6 +128,8 @@ pub struct NeighborStats { pub transaction_push_rx_counts: VecDeque<(u64, u64)>, /// (timestamp, num bytes) pub stackerdb_push_rx_counts: VecDeque<(u64, u64)>, + /// (timestamp, num bytes) + pub nakamoto_block_push_rx_counts: VecDeque<(u64, u64)>, pub relayed_messages: HashMap, } @@ -152,6 +154,7 @@ impl NeighborStats { microblocks_push_rx_counts: VecDeque::new(), transaction_push_rx_counts: VecDeque::new(), stackerdb_push_rx_counts: VecDeque::new(), + nakamoto_block_push_rx_counts: VecDeque::new(), relayed_messages: HashMap::new(), } } @@ -214,6 +217,17 @@ impl NeighborStats { } } + /// Record that we recently received a Nakamoto blcok push of the given size. + /// Keeps track of the last `NUM_BANDWIDTH_POINTS` such events, so we can estimate the current + /// bandwidth consumed by Nakamoto block pushes + pub fn add_nakamoto_block_push(&mut self, message_size: u64) -> () { + self.nakamoto_block_push_rx_counts + .push_back((get_epoch_time_secs(), message_size)); + while self.nakamoto_block_push_rx_counts.len() > NUM_BANDWIDTH_POINTS { + self.nakamoto_block_push_rx_counts.pop_front(); + } + } + pub fn add_relayer(&mut self, addr: &NeighborAddress, num_bytes: u64) -> () { if let Some(stats) = self.relayed_messages.get_mut(addr) { stats.num_messages += 1; @@ -298,6 +312,14 @@ impl NeighborStats { NeighborStats::get_bandwidth(&self.stackerdb_push_rx_counts, BANDWIDTH_POINT_LIFETIME) } + /// Get a peer's total nakamoto block bandwidth usage + pub fn get_nakamoto_block_push_bandwidth(&self) -> f64 { + NeighborStats::get_bandwidth( + &self.nakamoto_block_push_rx_counts, + BANDWIDTH_POINT_LIFETIME, + ) + } + /// Determine how many of a particular message this peer has received pub fn get_message_recv_count(&self, msg_id: StacksMessageID) -> u64 { *(self.msg_rx_counts.get(&msg_id).unwrap_or(&0)) @@ -1628,7 +1650,7 @@ impl ConversationP2P { .map_err(|e| net_error::from(e))?; if cfg!(test) { - // make *sure* the behavior stays the same + // make *sure* the behavior stays the same in epoch 2 let original_blocks_inv_data: BlocksInvData = chainstate.get_blocks_inventory(&block_hashes)?; @@ -1726,6 +1748,7 @@ impl ConversationP2P { &tip, sortdb, chainstate, + &network.stacks_tip.block_id(), reward_cycle, )?; let nakamoto_inv = NakamotoInvData::try_from(&bitvec_bools).map_err(|e| { @@ -1736,6 +1759,13 @@ impl ConversationP2P { e })?; + test_debug!( + "Reply NakamotoInv for {} (rc {}): {:?}", + &get_nakamoto_inv.consensus_hash, + reward_cycle, + &nakamoto_inv + ); + Ok(StacksMessageType::NakamotoInv(nakamoto_inv)) } @@ -2217,6 +2247,45 @@ impl ConversationP2P { Ok(None) } + /// Validate a pushed Nakamoto block list. + /// Update bandwidth accounting, but forward the blocks along if we can accept them. + /// Possibly return a reply handle for a NACK if we throttle the remote sender + fn validate_nakamoto_block_push( + &mut self, + network: &PeerNetwork, + preamble: &Preamble, + relayers: Vec, + ) -> Result, net_error> { + assert!(preamble.payload_len > 1); // don't count 1-byte type prefix + + let local_peer = network.get_local_peer(); + let chain_view = network.get_chain_view(); + + if !self.process_relayers(local_peer, preamble, &relayers) { + warn!( + "Drop pushed Nakamoto blocks -- invalid relayers {:?}", + &relayers + ); + self.stats.msgs_err += 1; + return Err(net_error::InvalidMessage); + } + + self.stats + .add_nakamoto_block_push((preamble.payload_len as u64) - 1); + + if self.connection.options.max_nakamoto_block_push_bandwidth > 0 + && self.stats.get_nakamoto_block_push_bandwidth() + > (self.connection.options.max_nakamoto_block_push_bandwidth as f64) + { + debug!("Neighbor {:?} exceeded max Nakamoto block push bandwidth of {} bytes/sec (currently at {})", &self.to_neighbor_key(), self.connection.options.max_nakamoto_block_push_bandwidth, self.stats.get_nakamoto_block_push_bandwidth()); + return self + .reply_nack(local_peer, chain_view, preamble, NackErrorCodes::Throttled) + .and_then(|handle| Ok(Some(handle))); + } + + Ok(None) + } + /// Handle an inbound authenticated p2p data-plane message. /// Return the message if not handled fn handle_data_message( @@ -2305,6 +2374,21 @@ impl ConversationP2P { } } } + StacksMessageType::NakamotoBlocks(_) => { + // not handled here, but do some accounting -- we can't receive too many + // Nakamoto blocks per second + match self.validate_nakamoto_block_push( + network, + &msg.preamble, + msg.relayers.clone(), + )? { + Some(handle) => Ok(handle), + None => { + // will forward upstream + return Ok(Some(msg)); + } + } + } _ => { // all else will forward upstream return Ok(Some(msg)); @@ -6603,6 +6687,54 @@ mod test { assert_eq!(bw_stats.get_stackerdb_push_bandwidth(), 110.0); } + #[test] + fn test_neighbor_stats_nakamoto_block_push_bandwidth() { + let mut stats = NeighborStats::new(false); + + assert_eq!(stats.get_nakamoto_block_push_bandwidth(), 0.0); + + stats.add_nakamoto_block_push(100); + assert_eq!(stats.get_nakamoto_block_push_bandwidth(), 0.0); + + // this should all happen in one second + let bw_stats = loop { + let mut bw_stats = stats.clone(); + let start = get_epoch_time_secs(); + + for _ in 0..(NUM_BANDWIDTH_POINTS - 1) { + bw_stats.add_nakamoto_block_push(100); + } + + let end = get_epoch_time_secs(); + if end == start { + break bw_stats; + } + }; + + assert_eq!( + bw_stats.get_nakamoto_block_push_bandwidth(), + (NUM_BANDWIDTH_POINTS as f64) * 100.0 + ); + + // space some out; make sure it takes 11 seconds + let bw_stats = loop { + let mut bw_stats = NeighborStats::new(false); + let start = get_epoch_time_secs(); + for _ in 0..11 { + bw_stats.add_nakamoto_block_push(100); + sleep_ms(1001); + } + + let end = get_epoch_time_secs(); + if end == start + 11 { + break bw_stats; + } + }; + + // 100 bytes/sec + assert_eq!(bw_stats.get_nakamoto_block_push_bandwidth(), 110.0); + } + #[test] fn test_sign_relay_forward_message() { let conn_opts = ConnectionOptions::default(); diff --git a/stackslib/src/net/codec.rs b/stackslib/src/net/codec.rs index c0496aa14c7..bd8154e414b 100644 --- a/stackslib/src/net/codec.rs +++ b/stackslib/src/net/codec.rs @@ -41,6 +41,7 @@ use stacks_common::util::secp256k1::{ use crate::burnchains::{BurnchainView, PrivateKey, PublicKey}; use crate::chainstate::burn::ConsensusHash; +use crate::chainstate::nakamoto::NakamotoBlock; use crate::chainstate::stacks::{ StacksBlock, StacksMicroblock, StacksPublicKey, StacksTransaction, MAX_BLOCK_LEN, }; @@ -353,6 +354,37 @@ impl NakamotoInvData { } } +impl StacksMessageCodec for NakamotoBlocksData { + #[cfg_attr(test, mutants::skip)] + fn consensus_serialize(&self, fd: &mut W) -> Result<(), codec_error> { + write_next(fd, &self.blocks)?; + Ok(()) + } + + fn consensus_deserialize(fd: &mut R) -> Result { + let blocks: Vec = { + // loose upper-bound + let mut bound_read = BoundReader::from_reader(fd, MAX_MESSAGE_LEN as u64); + read_next_at_most::<_, NakamotoBlock>(&mut bound_read, NAKAMOTO_BLOCKS_PUSHED_MAX) + }?; + + // only valid if there are no dups + let mut present = HashSet::new(); + for block in blocks.iter() { + if present.contains(&block.block_id()) { + // no dups allowed + return Err(codec_error::DeserializeError( + "Invalid NakamotoBlocksData: duplicate block".to_string(), + )); + } + + present.insert(block.block_id()); + } + + Ok(NakamotoBlocksData { blocks }) + } +} + impl StacksMessageCodec for GetPoxInv { fn consensus_serialize(&self, fd: &mut W) -> Result<(), codec_error> { write_next(fd, &self.consensus_hash)?; @@ -930,6 +962,7 @@ impl StacksMessageType { StacksMessageType::StackerDBPushChunk(ref _m) => StacksMessageID::StackerDBPushChunk, StacksMessageType::GetNakamotoInv(ref _m) => StacksMessageID::GetNakamotoInv, StacksMessageType::NakamotoInv(ref _m) => StacksMessageID::NakamotoInv, + StacksMessageType::NakamotoBlocks(ref _m) => StacksMessageID::NakamotoBlocks, } } @@ -964,6 +997,7 @@ impl StacksMessageType { StacksMessageType::StackerDBPushChunk(ref _m) => "StackerDBPushChunk", StacksMessageType::GetNakamotoInv(ref _m) => "GetNakamotoInv", StacksMessageType::NakamotoInv(ref _m) => "NakamotoInv", + StacksMessageType::NakamotoBlocks(ref _m) => "NakamotoBlocks", } } @@ -1071,6 +1105,15 @@ impl StacksMessageType { StacksMessageType::NakamotoInv(ref m) => { format!("NakamotoInv({:?})", &m.tenures) } + StacksMessageType::NakamotoBlocks(ref m) => { + format!( + "NakamotoBlocks({:?})", + m.blocks + .iter() + .map(|block| block.block_id()) + .collect::>() + ) + } } } } @@ -1122,6 +1165,7 @@ impl StacksMessageCodec for StacksMessageID { } x if x == StacksMessageID::GetNakamotoInv as u8 => StacksMessageID::GetNakamotoInv, x if x == StacksMessageID::NakamotoInv as u8 => StacksMessageID::NakamotoInv, + x if x == StacksMessageID::NakamotoBlocks as u8 => StacksMessageID::NakamotoBlocks, _ => { return Err(codec_error::DeserializeError( "Unknown message ID".to_string(), @@ -1166,6 +1210,7 @@ impl StacksMessageCodec for StacksMessageType { StacksMessageType::StackerDBPushChunk(ref m) => write_next(fd, m)?, StacksMessageType::GetNakamotoInv(ref m) => write_next(fd, m)?, StacksMessageType::NakamotoInv(ref m) => write_next(fd, m)?, + StacksMessageType::NakamotoBlocks(ref m) => write_next(fd, m)?, } Ok(()) } @@ -1276,6 +1321,10 @@ impl StacksMessageCodec for StacksMessageType { let m: NakamotoInvData = read_next(fd)?; StacksMessageType::NakamotoInv(m) } + StacksMessageID::NakamotoBlocks => { + let m: NakamotoBlocksData = read_next(fd)?; + StacksMessageType::NakamotoBlocks(m) + } StacksMessageID::Reserved => { return Err(codec_error::DeserializeError( "Unsupported message ID 'reserved'".to_string(), diff --git a/stackslib/src/net/connection.rs b/stackslib/src/net/connection.rs index 878ab04efb8..8dc5ad77946 100644 --- a/stackslib/src/net/connection.rs +++ b/stackslib/src/net/connection.rs @@ -368,6 +368,7 @@ pub struct ConnectionOptions { pub max_microblocks_push_bandwidth: u64, pub max_transaction_push_bandwidth: u64, pub max_stackerdb_push_bandwidth: u64, + pub max_nakamoto_block_push_bandwidth: u64, pub max_sockets: usize, pub public_ip_address: Option<(PeerAddress, u16)>, pub public_ip_request_timeout: u64, @@ -381,6 +382,7 @@ pub struct ConnectionOptions { pub max_buffered_microblocks_available: u64, pub max_buffered_blocks: u64, pub max_buffered_microblocks: u64, + pub max_buffered_nakamoto_blocks: u64, /// how often to query a remote peer for its mempool, in seconds pub mempool_sync_interval: u64, /// how many transactions to ask for in a mempool query @@ -393,30 +395,55 @@ pub struct ConnectionOptions { pub socket_send_buffer_size: u32, /// whether or not to announce or accept neighbors that are behind private networks pub private_neighbors: bool, + /// maximum number of confirmations for a nakamoto block's sortition for which it will be + /// pushed + pub max_nakamoto_block_relay_age: u64, + /// The authorization token to enable the block proposal RPC endpoint + pub block_proposal_token: Option, // fault injection + /// Disable neighbor walk and discovery pub disable_neighbor_walk: bool, + /// Disable sharing neighbors to a remote requester pub disable_chat_neighbors: bool, + /// Disable block inventory sync state machine pub disable_inv_sync: bool, + /// Disable sending inventory messages to a remote requester pub disable_inv_chat: bool, + /// Disable block download state machine pub disable_block_download: bool, + /// Disable network pruning pub disable_network_prune: bool, + /// Disable banning misbehaving peers pub disable_network_bans: bool, + /// Disable block availability advertisement pub disable_block_advertisement: bool, + /// Disable block pushing pub disable_block_push: bool, + /// Disable microblock pushing pub disable_microblock_push: bool, + /// Disable walk pingbacks -- don't attempt to walk to a remote peer even if it contacted us + /// first pub disable_pingbacks: bool, + /// Disable walking to inbound neighbors pub disable_inbound_walks: bool, + /// Disable all attempts to learn our IP address pub disable_natpunch: bool, + /// Disable handshakes from inbound neighbors pub disable_inbound_handshakes: bool, + /// Disable getting chunks from StackerDB (e.g. to test push-only) pub disable_stackerdb_get_chunks: bool, + /// Unconditionally disconnect a peer after this amount of time pub force_disconnect_interval: Option, /// If set to true, this forces the p2p state machine to believe that it is running in /// the reward cycle in which Nakamoto activates, and thus needs to run both the epoch /// 2.x and Nakamoto state machines. pub force_nakamoto_epoch_transition: bool, - /// The authorization token to enable the block proposal RPC endpoint - pub block_proposal_token: Option, + + // test facilitation + /// Do not require that an unsolicited message originate from an authenticated, connected + /// neighbor + pub test_disable_unsolicited_message_authentication: bool, } impl std::default::Default for ConnectionOptions { @@ -472,6 +499,7 @@ impl std::default::Default for ConnectionOptions { max_microblocks_push_bandwidth: 0, // infinite upload bandwidth allowed max_transaction_push_bandwidth: 0, // infinite upload bandwidth allowed max_stackerdb_push_bandwidth: 0, // infinite upload bandwidth allowed + max_nakamoto_block_push_bandwidth: 0, // infinite upload bandwidth allowed max_sockets: 800, // maximum number of client sockets we'll ever register public_ip_address: None, // resolve it at runtime by default public_ip_request_timeout: 60, // how often we can attempt to look up our public IP address @@ -479,18 +507,21 @@ impl std::default::Default for ConnectionOptions { public_ip_max_retries: 3, // maximum number of retries before self-throttling for $public_ip_timeout max_block_push: 10, // maximum number of blocksData messages to push out via our anti-entropy protocol max_microblock_push: 10, // maximum number of microblocks messages to push out via our anti-entropy protocol - antientropy_retry: 60, // retry pushing data once every minute + antientropy_retry: 3600, // retry pushing data once every hour antientropy_public: true, // run antientropy even if we're NOT NAT'ed - max_buffered_blocks_available: 1, - max_buffered_microblocks_available: 1, - max_buffered_blocks: 1, - max_buffered_microblocks: 10, + max_buffered_blocks_available: 5, + max_buffered_microblocks_available: 5, + max_buffered_blocks: 5, + max_buffered_microblocks: 1024, + max_buffered_nakamoto_blocks: 1024, mempool_sync_interval: 30, // number of seconds in-between mempool sync mempool_max_tx_query: 128, // maximum number of transactions to visit per mempool query mempool_sync_timeout: 180, // how long a mempool sync can go for (3 minutes) socket_recv_buffer_size: 131072, // Linux default socket_send_buffer_size: 16384, // Linux default private_neighbors: true, + max_nakamoto_block_relay_age: 6, + block_proposal_token: None, // no faults on by default disable_neighbor_walk: false, @@ -510,7 +541,9 @@ impl std::default::Default for ConnectionOptions { disable_stackerdb_get_chunks: false, force_disconnect_interval: None, force_nakamoto_epoch_transition: false, - block_proposal_token: None, + + // no test facilitations on by default + test_disable_unsolicited_message_authentication: false, } } } diff --git a/stackslib/src/net/db.rs b/stackslib/src/net/db.rs index 1c116a6174e..07f0bb5d74c 100644 --- a/stackslib/src/net/db.rs +++ b/stackslib/src/net/db.rs @@ -23,8 +23,9 @@ use clarity::vm::types::{ use rand::seq::SliceRandom; use rand::{thread_rng, Rng, RngCore}; use rusqlite::types::ToSql; -use rusqlite::{Connection, OpenFlags, OptionalExtension, Row, Transaction, NO_PARAMS}; +use rusqlite::{params, Connection, OpenFlags, OptionalExtension, Row, Transaction}; use stacks_common::types::net::{PeerAddress, PeerHost}; +use stacks_common::types::sqlite::NO_PARAMS; use stacks_common::util; use stacks_common::util::hash::{ bin_bytes, hex_bytes, to_bin, to_hex, Hash160, Sha256Sum, Sha512Trunc256Sum, @@ -445,17 +446,17 @@ impl PeerDB { PeerDB::apply_schema_migrations(&tx)?; - let local_peer_args: &[&dyn ToSql] = &[ - &network_id, - &parent_network_id, - &to_hex(&localpeer.nonce), - &to_hex(&localpeer.private_key.to_bytes()), - &u64_to_sql(key_expires)?, - &to_bin(localpeer.addrbytes.as_bytes()), - &localpeer.port, - &localpeer.services, - &localpeer.data_url.as_str(), - &serde_json::to_string(stacker_dbs) + let local_peer_args = params![ + network_id, + parent_network_id, + to_hex(&localpeer.nonce), + to_hex(&localpeer.private_key.to_bytes()), + u64_to_sql(key_expires)?, + to_bin(localpeer.addrbytes.as_bytes()), + localpeer.port, + localpeer.services, + localpeer.data_url.as_str(), + serde_json::to_string(stacker_dbs) .expect("FATAL: failed to serialize stacker db contract addresses"), ]; @@ -507,11 +508,9 @@ impl PeerDB { fn get_schema_version(conn: &Connection) -> Result { let version = conn - .query_row( - "SELECT MAX(version) from db_config", - rusqlite::NO_PARAMS, - |row| row.get(0), - ) + .query_row("SELECT MAX(version) from db_config", NO_PARAMS, |row| { + row.get(0) + }) .optional()? .unwrap_or("1".to_string()); Ok(version) @@ -557,13 +556,13 @@ impl PeerDB { p2p_port: u16, stacker_dbs: &[QualifiedContractIdentifier], ) -> Result<(), db_error> { - let local_peer_args: &[&dyn ToSql] = &[ - &p2p_port, - &data_url.as_str(), - &serde_json::to_string(stacker_dbs) + let local_peer_args = params![ + p2p_port, + data_url.as_str(), + serde_json::to_string(stacker_dbs) .expect("FATAL: unable to serialize Vec"), - &network_id, - &parent_network_id, + network_id, + parent_network_id, ]; match self.conn.execute("UPDATE local_peer SET port = ?1, data_url = ?2, stacker_dbs = ?3 WHERE network_id = ?4 AND parent_network_id = ?5", @@ -819,7 +818,7 @@ impl PeerDB { ) -> Result<(), db_error> { tx.execute( "UPDATE local_peer SET addrbytes = ?1, port = ?2", - &[&to_bin(addrbytes.as_bytes().as_ref()), &port as &dyn ToSql], + params![to_bin(addrbytes.as_bytes()), port], // TODO: double check if delete as_ref here ) .map_err(db_error::SqliteError)?; @@ -828,11 +827,8 @@ impl PeerDB { /// Set local service availability pub fn set_local_services(tx: &Transaction, services: u16) -> Result<(), db_error> { - tx.execute( - "UPDATE local_peer SET services = ?1", - &[&services as &dyn ToSql], - ) - .map_err(db_error::SqliteError)?; + tx.execute("UPDATE local_peer SET services = ?1", params![services]) + .map_err(db_error::SqliteError)?; Ok(()) } @@ -843,7 +839,7 @@ impl PeerDB { privkey: &Secp256k1PrivateKey, expire_block: u64, ) -> Result<(), db_error> { - let args: &[&dyn ToSql] = &[&to_hex(&privkey.to_bytes()), &u64_to_sql(expire_block)?]; + let args = params![to_hex(&privkey.to_bytes()), u64_to_sql(expire_block)?]; tx.execute( "UPDATE local_peer SET private_key = ?1, private_key_expire = ?2", args, @@ -916,12 +912,8 @@ impl PeerDB { peer_port: u16, ) -> Result, db_error> { let qry = "SELECT * FROM frontier WHERE network_id = ?1 AND addrbytes = ?2 AND port = ?3"; - let args = [ - &network_id as &dyn ToSql, - &peer_addr.to_bin() as &dyn ToSql, - &peer_port as &dyn ToSql, - ]; - query_row::(conn, qry, &args) + let args = params![network_id, peer_addr.to_bin(), peer_port,]; + query_row::(conn, qry, args) } pub fn has_peer( @@ -931,7 +923,7 @@ impl PeerDB { peer_port: u16, ) -> Result { let qry = "SELECT 1 FROM frontier WHERE network_id = ?1 AND addrbytes = ?2 AND port = ?3"; - let args: &[&dyn ToSql] = &[&network_id, &peer_addr.to_bin(), &peer_port]; + let args = params![network_id, peer_addr.to_bin(), peer_port]; Ok(query_row::(conn, &qry, args)? .map(|x| x == 1) .unwrap_or(false)) @@ -945,8 +937,8 @@ impl PeerDB { peer_port: u16, ) -> Result, db_error> { let qry = "SELECT * FROM frontier WHERE network_id = ?1 AND port = ?2"; - let args = [&network_id as &dyn ToSql, &peer_port as &dyn ToSql]; - query_row::(conn, &qry, &args) + let args = params![network_id, peer_port]; + query_row::(conn, &qry, args) } /// Get a peer record at a particular slot @@ -956,15 +948,15 @@ impl PeerDB { slot: u32, ) -> Result, db_error> { let qry = "SELECT * FROM frontier WHERE network_id = ?1 AND slot = ?2"; - let args = [&network_id as &dyn ToSql, &slot as &dyn ToSql]; - query_row::(conn, &qry, &args) + let args = params![network_id, slot]; + query_row::(conn, &qry, args) } /// Is there any peer at a particular slot? pub fn has_peer_at(conn: &DBConn, network_id: u32, slot: u32) -> Result { let qry = "SELECT 1 FROM frontier WHERE network_id = ?1 AND slot = ?2"; - let args = [&network_id as &dyn ToSql, &slot as &dyn ToSql]; - Ok(query_row::(conn, &qry, &args)? + let args = params![network_id, slot]; + Ok(query_row::(conn, &qry, args)? .map(|x| x == 1) .unwrap_or(false)) } @@ -1040,7 +1032,7 @@ impl PeerDB { ) -> Result<(), db_error> { for cid in smart_contracts { test_debug!("Add Stacker DB contract to slot {}: {}", slot, cid); - let args: &[&dyn ToSql] = &[&cid.to_string(), &slot]; + let args = params![cid.to_string(), slot]; tx.execute("INSERT OR REPLACE INTO stackerdb_peers (smart_contract_id,peer_slot) VALUES (?1,?2)", args) .map_err(db_error::SqliteError)?; } @@ -1062,22 +1054,22 @@ impl PeerDB { ) -> Result<(), db_error> { let old_peer_opt = PeerDB::get_peer_at(tx, neighbor.addr.network_id, slot)?; - let neighbor_args: &[&dyn ToSql] = &[ - &neighbor.addr.peer_version, - &neighbor.addr.network_id, - &to_bin(neighbor.addr.addrbytes.as_bytes()), - &neighbor.addr.port, - &to_hex(&neighbor.public_key.to_bytes_compressed()), - &u64_to_sql(neighbor.expire_block)?, - &u64_to_sql(neighbor.last_contact_time)?, - &neighbor.asn, - &neighbor.org, - &neighbor.allowed, - &neighbor.denied, - &neighbor.in_degree, - &neighbor.out_degree, - &0i64, - &slot, + let neighbor_args = params![ + neighbor.addr.peer_version, + neighbor.addr.network_id, + to_bin(neighbor.addr.addrbytes.as_bytes()), + neighbor.addr.port, + to_hex(&neighbor.public_key.to_bytes_compressed()), + u64_to_sql(neighbor.expire_block)?, + u64_to_sql(neighbor.last_contact_time)?, + neighbor.asn, + neighbor.org, + neighbor.allowed, + neighbor.denied, + neighbor.in_degree, + neighbor.out_degree, + 0i64, + slot, ]; tx.execute("INSERT OR REPLACE INTO frontier (peer_version, network_id, addrbytes, port, public_key, expire_block_height, last_contact_time, asn, org, allowed, denied, in_degree, out_degree, initial, slot) \ @@ -1108,11 +1100,7 @@ impl PeerDB { let slot_opt = Self::find_peer_slot(tx, network_id, peer_addr, peer_port)?; tx.execute( "DELETE FROM frontier WHERE network_id = ?1 AND addrbytes = ?2 AND port = ?3", - &[ - &network_id as &dyn ToSql, - &peer_addr.to_bin() as &dyn ToSql, - &peer_port as &dyn ToSql, - ], + params![network_id, peer_addr.to_bin(), peer_port,], ) .map_err(db_error::SqliteError)?; @@ -1132,7 +1120,7 @@ impl PeerDB { let res: Option = query_row( conn, "SELECT initial FROM frontier WHERE network_id = ?1 AND addrbytes = ?2 AND port = ?3", - &[&network_id as &dyn ToSql, &peer_addr.to_bin(), &peer_port], + params![network_id, peer_addr.to_bin(), peer_port], )?; match res { @@ -1142,14 +1130,14 @@ impl PeerDB { } /// Set a peer as an initial peer - fn set_initial_peer( + pub fn set_initial_peer( tx: &Transaction, network_id: u32, peer_addr: &PeerAddress, peer_port: u16, ) -> Result<(), db_error> { tx.execute("UPDATE frontier SET initial = 1 WHERE network_id = ?1 AND addrbytes = ?2 AND port = ?3", - &[&network_id as &dyn ToSql, &peer_addr.to_bin(), &peer_port]) + params![network_id, peer_addr.to_bin(), peer_port]) .map_err(db_error::SqliteError)?; Ok(()) @@ -1173,7 +1161,7 @@ impl PeerDB { allow_deadline: i64, ) -> Result<(), db_error> { let num_updated = tx.execute("UPDATE frontier SET allowed = ?1 WHERE network_id = ?2 AND addrbytes = ?3 AND port = ?4", - &[&allow_deadline as &dyn ToSql, &network_id, &peer_addr.to_bin(), &peer_port]) + params![allow_deadline, network_id, peer_addr.to_bin(), peer_port]) .map_err(db_error::SqliteError)?; if num_updated == 0 { @@ -1213,11 +1201,11 @@ impl PeerDB { peer_port: u16, deny_deadline: u64, ) -> Result<(), db_error> { - let args: &[&dyn ToSql] = &[ - &u64_to_sql(deny_deadline)?, - &network_id, - &peer_addr.to_bin(), - &peer_port, + let args = params![ + u64_to_sql(deny_deadline)?, + network_id, + peer_addr.to_bin(), + peer_port, ]; let num_updated = tx.execute("UPDATE frontier SET denied = ?1 WHERE network_id = ?2 AND addrbytes = ?3 AND port = ?4", args) .map_err(db_error::SqliteError)?; @@ -1259,20 +1247,20 @@ impl PeerDB { neighbor.addr.port, )?; - let args: &[&dyn ToSql] = &[ - &neighbor.addr.peer_version, - &to_hex(&neighbor.public_key.to_bytes_compressed()), - &u64_to_sql(neighbor.expire_block)?, - &u64_to_sql(neighbor.last_contact_time)?, - &neighbor.asn, - &neighbor.org, - &neighbor.allowed, - &neighbor.denied, - &neighbor.in_degree, - &neighbor.out_degree, - &neighbor.addr.network_id, - &to_bin(neighbor.addr.addrbytes.as_bytes()), - &neighbor.addr.port, + let args = params![ + neighbor.addr.peer_version, + to_hex(&neighbor.public_key.to_bytes_compressed()), + u64_to_sql(neighbor.expire_block)?, + u64_to_sql(neighbor.last_contact_time)?, + neighbor.asn, + neighbor.org, + neighbor.allowed, + neighbor.denied, + neighbor.in_degree, + neighbor.out_degree, + neighbor.addr.network_id, + to_bin(neighbor.addr.addrbytes.as_bytes()), + neighbor.addr.port, ]; tx.execute("UPDATE frontier SET peer_version = ?1, public_key = ?2, expire_block_height = ?3, last_contact_time = ?4, asn = ?5, org = ?6, allowed = ?7, denied = ?8, in_degree = ?9, out_degree = ?10 \ @@ -1311,7 +1299,7 @@ impl PeerDB { ) -> Result, db_error> { let qry = "SELECT slot FROM frontier WHERE network_id = ?1 AND addrbytes = ?2 AND port = ?3"; - let args: &[&dyn ToSql] = &[&network_id, &addrbytes.to_bin(), &port]; + let args = params![network_id, addrbytes.to_bin(), port]; Ok(query_row::(conn, qry, args)?) } @@ -1337,7 +1325,7 @@ impl PeerDB { smart_contract: &QualifiedContractIdentifier, ) -> Result, db_error> { let qry = "SELECT peer_slot FROM stackerdb_peers WHERE smart_contract_id = ?1"; - let args: &[&dyn ToSql] = &[&smart_contract.to_string()]; + let args = params![smart_contract.to_string()]; query_rows(conn, qry, args) } @@ -1397,7 +1385,7 @@ impl PeerDB { let sql = "DELETE FROM stackerdb_peers WHERE smart_contract_id = ?1 AND peer_slot = ?2"; for cid in to_delete.into_iter() { test_debug!("Delete Stacker DB for {:?}: {}", &neighbor.addr, &cid); - let args: &[&dyn ToSql] = &[&cid.to_string(), &slot]; + let args = params![cid.to_string(), slot]; tx.execute(sql, args).map_err(db_error::SqliteError)?; } @@ -1405,7 +1393,7 @@ impl PeerDB { "INSERT OR REPLACE INTO stackerdb_peers (smart_contract_id,peer_slot) VALUES (?1,?2)"; for cid in to_insert.iter() { test_debug!("Add Stacker DB for {:?}: {}", &neighbor.addr, &cid); - let args: &[&dyn ToSql] = &[&cid.to_string(), &slot]; + let args = params![cid.to_string(), slot]; tx.execute(sql, args).map_err(db_error::SqliteError)?; } @@ -1462,7 +1450,7 @@ impl PeerDB { prefix: &PeerAddress, mask: u32, ) -> Result<(), db_error> { - let args: &[&dyn ToSql] = &[&prefix.to_bin(), &mask]; + let args = params![prefix.to_bin(), mask]; tx.execute( &format!( "INSERT OR REPLACE INTO {} (prefix, mask) VALUES (?1, ?2)", @@ -1481,7 +1469,7 @@ impl PeerDB { prefix: &PeerAddress, mask: u32, ) -> Result<(), db_error> { - let args: &[&dyn ToSql] = &[&prefix.to_bin(), &mask]; + let args = params![prefix.to_bin(), mask]; tx.execute( &format!("DELETE FROM {} WHERE prefix = ?1 AND mask = ?2", table), args, @@ -1558,7 +1546,7 @@ impl PeerDB { ) -> Result<(), db_error> { assert!(mask > 0 && mask <= 128); let prefix_txt = PeerDB::cidr_prefix_to_string(prefix, mask); - let args: &[&dyn ToSql] = &[&value, &mask, &prefix_txt]; + let args = params![value, mask, prefix_txt]; tx.execute( &format!( "UPDATE frontier SET {} = ?1 WHERE SUBSTR(addrbytes,1,?2) = SUBSTR(?3,1,?2)", @@ -1636,11 +1624,11 @@ impl PeerDB { if always_include_allowed { // always include allowed neighbors, freshness be damned let allow_qry = "SELECT * FROM frontier WHERE network_id = ?1 AND denied < ?2 AND (allowed < 0 OR ?3 < allowed) AND (peer_version & 0x000000ff) >= ?4"; - let allow_args: &[&dyn ToSql] = &[ - &network_id, - &u64_to_sql(now_secs)?, - &u64_to_sql(now_secs)?, - &network_epoch, + let allow_args = params![ + network_id, + u64_to_sql(now_secs)?, + u64_to_sql(now_secs)?, + network_epoch, ]; let mut allow_rows = query_rows::(conn, &allow_qry, allow_args)?; @@ -1666,14 +1654,14 @@ impl PeerDB { (allowed < 0 OR (allowed >= 0 AND allowed <= ?5)) AND (peer_version & 0x000000ff) >= ?6 ORDER BY RANDOM() LIMIT ?7" }; - let random_peers_args: &[&dyn ToSql] = &[ - &network_id, - &u64_to_sql(min_age)?, - &u64_to_sql(block_height)?, - &u64_to_sql(now_secs)?, - &u64_to_sql(now_secs)?, - &network_epoch, - &(count - (ret.len() as u32)), + let random_peers_args = params![ + network_id, + u64_to_sql(min_age)?, + u64_to_sql(block_height)?, + u64_to_sql(now_secs)?, + u64_to_sql(now_secs)?, + network_epoch, + (count - (ret.len() as u32)), ]; let mut random_peers = query_rows::(conn, &random_peers_qry, random_peers_args)?; @@ -1723,12 +1711,7 @@ impl PeerDB { fn asn4_insert(tx: &Transaction, asn4: &ASEntry4) -> Result<(), db_error> { tx.execute( "INSERT OR REPLACE INTO asn4 (prefix, mask, asn, org) VALUES (?1, ?2, ?3, ?4)", - &[ - &asn4.prefix as &dyn ToSql, - &asn4.mask as &dyn ToSql, - &asn4.asn as &dyn ToSql, - &asn4.org as &dyn ToSql, - ], + params![asn4.prefix, asn4.mask, asn4.asn, asn4.org,], ) .map_err(db_error::SqliteError)?; @@ -1747,8 +1730,8 @@ impl PeerDB { let addr_u32 = addrbits.ipv4_bits().unwrap(); let qry = "SELECT * FROM asn4 WHERE prefix = (?1 & ~((1 << (32 - mask)) - 1)) ORDER BY prefix DESC LIMIT 1"; - let args = [&addr_u32 as &dyn ToSql]; - let rows = query_rows::(conn, &qry, &args)?; + let args = params![addr_u32]; + let rows = query_rows::(conn, &qry, args)?; match rows.len() { 0 => Ok(None), _ => Ok(Some(rows[0].asn)), @@ -1770,8 +1753,8 @@ impl PeerDB { #[cfg_attr(test, mutants::skip)] pub fn asn_count(conn: &DBConn, asn: u32) -> Result { let qry = "SELECT COUNT(*) FROM frontier WHERE asn = ?1"; - let args = [&asn as &dyn ToSql]; - let count = query_count(conn, &qry, &args)?; + let args = params![asn]; + let count = query_count(conn, &qry, args)?; Ok(count as u64) } @@ -1803,11 +1786,11 @@ impl PeerDB { } let qry = "SELECT DISTINCT frontier.* FROM frontier JOIN stackerdb_peers ON stackerdb_peers.peer_slot = frontier.slot WHERE stackerdb_peers.smart_contract_id = ?1 AND frontier.network_id = ?2 AND frontier.last_contact_time >= ?3 ORDER BY RANDOM() LIMIT ?4"; let max_count_u32 = u32::try_from(max_count).unwrap_or(u32::MAX); - let args: &[&dyn ToSql] = &[ - &smart_contract.to_string(), - &network_id, - &u64_to_sql(min_age)?, - &max_count_u32, + let args = params![ + smart_contract.to_string(), + network_id, + u64_to_sql(min_age)?, + max_count_u32, ]; query_rows(conn, qry, args) } diff --git a/stackslib/src/net/download/epoch2x.rs b/stackslib/src/net/download/epoch2x.rs index f5b4b44a3a6..c57d9d19bc8 100644 --- a/stackslib/src/net/download/epoch2x.rs +++ b/stackslib/src/net/download/epoch2x.rs @@ -522,7 +522,9 @@ impl BlockDownloader { self.broken_neighbors.push(block_key.neighbor.clone()); } Err(e) => { - info!("Error decoding response from remote neighbor {:?} (at {}): {:?}", &block_key.neighbor, &block_key.data_url, &e); + info!("Error decoding response from remote neighbor {:?} (at {}): {:?}", &block_key.neighbor, &block_key.data_url, &e; + "consensus_hash" => %block_key.consensus_hash + ); self.broken_peers.push(event_id); self.broken_neighbors.push(block_key.neighbor.clone()); } @@ -626,7 +628,9 @@ impl BlockDownloader { Ok(microblocks) => { if microblocks.len() == 0 { // we wouldn't have asked for a 0-length stream - info!("Got unexpected zero-length microblock stream from {:?} ({:?})", &block_key.neighbor, &block_key.data_url); + info!("Got unexpected zero-length microblock stream from {:?} ({:?})", &block_key.neighbor, &block_key.data_url; + "consensus_hash" => %block_key.consensus_hash + ); self.broken_peers.push(event_id); self.broken_neighbors.push(block_key.neighbor.clone()); } else { @@ -644,7 +648,9 @@ impl BlockDownloader { Err(net_error::NotFoundError) => { // remote peer didn't have the microblock, even though their blockinv said // they did. - info!("Remote neighbor {:?} ({:?}) does not have microblock stream indexed at {}", &block_key.neighbor, &block_key.data_url, &block_key.index_block_hash); + info!("Remote neighbor {:?} ({:?}) does not have microblock stream indexed at {}", &block_key.neighbor, &block_key.data_url, &block_key.index_block_hash; + "consensus_hash" => %block_key.consensus_hash + ); // the fact that we asked this peer means that it's block inv indicated // it was present, so the absence is the mark of a broken peer. @@ -654,7 +660,9 @@ impl BlockDownloader { // talk to them for a while. } Err(e) => { - info!("Error decoding response from remote neighbor {:?} (at {}): {:?}", &block_key.neighbor, &block_key.data_url, &e); + info!("Error decoding response from remote neighbor {:?} (at {}): {:?}", &block_key.neighbor, &block_key.data_url, &e; + "consensus_hash" => %block_key.consensus_hash + ); self.broken_peers.push(event_id); self.broken_neighbors.push(block_key.neighbor.clone()); } diff --git a/stackslib/src/net/download/nakamoto/download_state_machine.rs b/stackslib/src/net/download/nakamoto/download_state_machine.rs index 77cf64dba6b..3865e8ee398 100644 --- a/stackslib/src/net/download/nakamoto/download_state_machine.rs +++ b/stackslib/src/net/download/nakamoto/download_state_machine.rs @@ -38,9 +38,11 @@ use crate::chainstate::burn::db::sortdb::{ BlockHeaderCache, SortitionDB, SortitionDBConn, SortitionHandleConn, }; use crate::chainstate::burn::BlockSnapshot; +use crate::chainstate::coordinator::RewardCycleInfo; use crate::chainstate::nakamoto::{ NakamotoBlock, NakamotoBlockHeader, NakamotoChainState, NakamotoStagingBlocksConnRef, }; +use crate::chainstate::stacks::boot::RewardSet; use crate::chainstate::stacks::db::StacksChainState; use crate::chainstate::stacks::{ Error as chainstate_error, StacksBlockHeader, TenureChangePayload, @@ -61,7 +63,7 @@ use crate::net::inv::epoch2x::InvState; use crate::net::inv::nakamoto::{NakamotoInvStateMachine, NakamotoTenureInv}; use crate::net::neighbors::rpc::NeighborRPC; use crate::net::neighbors::NeighborComms; -use crate::net::p2p::PeerNetwork; +use crate::net::p2p::{CurrentRewardSet, PeerNetwork}; use crate::net::server::HttpPeer; use crate::net::{Error as NetError, Neighbor, NeighborAddress, NeighborKey}; use crate::util_lib::db::{DBConn, Error as DBError}; @@ -116,10 +118,12 @@ pub struct NakamotoDownloadStateMachine { tenure_start_blocks: HashMap, /// comms to remote neighbors pub(super) neighbor_rpc: NeighborRPC, + /// Nakamoto chain tip + nakamoto_tip: StacksBlockId, } impl NakamotoDownloadStateMachine { - pub fn new(nakamoto_start_height: u64) -> Self { + pub fn new(nakamoto_start_height: u64, nakamoto_tip: StacksBlockId) -> Self { Self { nakamoto_start_height, reward_cycle: 0, // will be calculated at runtime @@ -135,6 +139,7 @@ impl NakamotoDownloadStateMachine { unconfirmed_tenure_downloads: HashMap::new(), tenure_start_blocks: HashMap::new(), neighbor_rpc: NeighborRPC::new(), + nakamoto_tip, } } @@ -269,8 +274,7 @@ impl NakamotoDownloadStateMachine { .pox_constants .reward_cycle_to_block_height(sortdb.first_block_height, tip_rc.saturating_add(1)) .saturating_sub(1) - .min(tip.block_height) - .saturating_add(1); + .min(tip.block_height.saturating_add(1)); test_debug!( "Load tip sortitions between {} and {} (loaded_so_far = {})", @@ -308,6 +312,7 @@ impl NakamotoDownloadStateMachine { nakamoto_start: u64, wanted_tenures: &mut [WantedTenure], chainstate: &StacksChainState, + stacks_tip: &StacksBlockId, ) -> Result<(), NetError> { for wt in wanted_tenures.iter_mut() { test_debug!("update_processed_wanted_tenures: consider {:?}", &wt); @@ -320,7 +325,8 @@ impl NakamotoDownloadStateMachine { continue; } if NakamotoChainState::has_processed_nakamoto_tenure( - chainstate.db(), + &mut chainstate.index_conn(), + stacks_tip, &wt.tenure_id_consensus_hash, )? { test_debug!("Tenure {} is now processed", &wt.tenure_id_consensus_hash); @@ -346,6 +352,7 @@ impl NakamotoDownloadStateMachine { self.nakamoto_start_height, prev_wanted_tenures, chainstate, + &self.nakamoto_tip, )?; } test_debug!("update_processed_wanted_tenures: update wanted_tenures"); @@ -353,6 +360,7 @@ impl NakamotoDownloadStateMachine { self.nakamoto_start_height, &mut self.wanted_tenures, chainstate, + &self.nakamoto_tip, ) } @@ -368,17 +376,33 @@ impl NakamotoDownloadStateMachine { /// Returns Err(..) on DB error. pub(crate) fn load_tenure_start_blocks( wanted_tenures: &[WantedTenure], - chainstate: &StacksChainState, + chainstate: &mut StacksChainState, + tip_block_id: &StacksBlockId, tenure_start_blocks: &mut HashMap, ) -> Result<(), NetError> { for wt in wanted_tenures { - let Some(tenure_start_block) = chainstate - .nakamoto_blocks_db() - .get_nakamoto_tenure_start_block(&wt.tenure_id_consensus_hash)? + let Some(tenure_start_block_header) = + NakamotoChainState::get_nakamoto_tenure_start_block_header( + &mut chainstate.index_conn(), + tip_block_id, + &wt.tenure_id_consensus_hash, + )? else { test_debug!("No tenure-start block for {}", &wt.tenure_id_consensus_hash); continue; }; + let Some((tenure_start_block, _)) = chainstate + .nakamoto_blocks_db() + .get_nakamoto_block(&tenure_start_block_header.index_block_hash())? + else { + let msg = format!( + "Have header but no block for tenure-start of {} ({})", + &wt.tenure_id_consensus_hash, + &tenure_start_block_header.index_block_hash() + ); + error!("{}", &msg); + return Err(NetError::ChainstateError(msg)); + }; tenure_start_blocks.insert(tenure_start_block.block_id(), tenure_start_block); } Ok(()) @@ -387,11 +411,12 @@ impl NakamotoDownloadStateMachine { /// Update our local tenure start block data fn update_tenure_start_blocks( &mut self, - chainstate: &StacksChainState, + chainstate: &mut StacksChainState, ) -> Result<(), NetError> { Self::load_tenure_start_blocks( &self.wanted_tenures, chainstate, + &self.nakamoto_tip, &mut self.tenure_start_blocks, ) } @@ -654,7 +679,9 @@ impl NakamotoDownloadStateMachine { // the prev_wanted_rc and at least one in the cur_wanted_rc let mut has_prev_rc_block = false; let mut has_cur_rc_block = false; + let mut available_considered = 0; for (_naddr, available) in tenure_block_ids.iter() { + available_considered += available.len(); for (_ch, tenure_info) in available.iter() { if tenure_info.start_reward_cycle == prev_wanted_rc || tenure_info.end_reward_cycle == prev_wanted_rc @@ -669,8 +696,9 @@ impl NakamotoDownloadStateMachine { } } - if (prev_wanted_rc >= first_nakamoto_rc && !has_prev_rc_block) - || (cur_wanted_rc >= first_nakamoto_rc && !has_cur_rc_block) + if available_considered > 0 + && ((prev_wanted_rc >= first_nakamoto_rc && !has_prev_rc_block) + || (cur_wanted_rc >= first_nakamoto_rc && !has_cur_rc_block)) { debug!( "tenure_block_ids stale: missing representation in reward cycles {} ({}) and {} ({})", @@ -731,7 +759,7 @@ impl NakamotoDownloadStateMachine { &mut self, network: &PeerNetwork, sortdb: &SortitionDB, - chainstate: &StacksChainState, + chainstate: &mut StacksChainState, ) -> Result<(), NetError> { let sort_tip = &network.burnchain_tip; let Some(invs) = network.inv_state_nakamoto.as_ref() else { @@ -861,6 +889,7 @@ impl NakamotoDownloadStateMachine { "Peer {} has no inventory for reward cycle {}", naddr, reward_cycle ); + test_debug!("Peer {} has the following inventory data: {:?}", naddr, inv); continue; }; for (i, wt) in wanted_tenures.iter().enumerate() { @@ -981,9 +1010,13 @@ impl NakamotoDownloadStateMachine { fn count_available_tenure_neighbors( available: &HashMap>, ) -> usize { - available - .iter() - .fold(0, |count, (_ch, naddrs)| count.saturating_add(naddrs.len())) + let mut neighbors = HashSet::new(); + for (_, naddrs) in available.iter() { + for naddr in naddrs.iter() { + neighbors.insert(naddr); + } + } + neighbors.len() } /// This function examines the contents of `self.wanted_tenures` and @@ -1152,14 +1185,14 @@ impl NakamotoDownloadStateMachine { fn update_tenure_downloaders( &mut self, count: usize, - agg_public_keys: &BTreeMap>, + current_reward_sets: &BTreeMap, ) { self.tenure_downloads.make_tenure_downloaders( &mut self.tenure_download_schedule, &mut self.available_tenures, &mut self.tenure_block_ids, count, - agg_public_keys, + current_reward_sets, ) } @@ -1182,7 +1215,6 @@ impl NakamotoDownloadStateMachine { pox_constants: &PoxConstants, first_burn_height: u64, inventory_iter: impl Iterator, - blocks_db: NakamotoStagingBlocksConnRef, ) -> bool { if sort_tip.block_height < burnchain_height { test_debug!( @@ -1229,26 +1261,16 @@ impl NakamotoDownloadStateMachine { .any(|(_, available)| available.contains_key(&wt.tenure_id_consensus_hash)); if is_available && !wt.processed { + // a tenure is available but not yet processed, so we can't yet transition to + // fetching unconfirmed tenures (we'd have no way to validate them). + test_debug!( + "Tenure {} is available but not yet processed", + &wt.tenure_id_consensus_hash + ); return false; } } - // there are still tenures that have to be processed - if blocks_db - .has_any_unprocessed_nakamoto_block() - .map_err(|e| { - warn!( - "Failed to determine if there are unprocessed Nakamoto blocks: {:?}", - &e - ); - e - }) - .unwrap_or(true) - { - test_debug!("Still have stored but unprocessed Nakamoto blocks"); - return false; - } - true } @@ -1294,14 +1316,16 @@ impl NakamotoDownloadStateMachine { count: usize, downloaders: &mut HashMap, highest_processed_block_id: Option, - ) { - while downloaders.len() < count { - let Some(naddr) = schedule.front() else { - break; - }; + ) -> usize { + let mut added = 0; + schedule.retain(|naddr| { if downloaders.contains_key(naddr) { - continue; + return true; + } + if added >= count { + return true; } + let unconfirmed_tenure_download = NakamotoUnconfirmedTenureDownloader::new( naddr.clone(), highest_processed_block_id.clone(), @@ -1309,8 +1333,10 @@ impl NakamotoDownloadStateMachine { test_debug!("Request unconfirmed tenure state from neighbor {}", &naddr); downloaders.insert(naddr.clone(), unconfirmed_tenure_download); - schedule.pop_front(); - } + added += 1; + false + }); + added } /// Update our unconfirmed tenure download state machines @@ -1360,30 +1386,34 @@ impl NakamotoDownloadStateMachine { sortdb: &SortitionDB, sort_tip: &BlockSnapshot, chainstate: &StacksChainState, - highest_complete_tenure: &WantedTenure, - unconfirmed_tenure: &WantedTenure, ) -> ( HashMap>, HashMap, ) { + test_debug!("Run unconfirmed tenure downloaders"); + let addrs: Vec<_> = downloaders.keys().map(|addr| addr.clone()).collect(); let mut finished = vec![]; let mut unconfirmed_blocks = HashMap::new(); let mut highest_completed_tenure_downloaders = HashMap::new(); - // find the highest-processed block, and update all ongoing state-machines. - // Then, as faster state-machines linked to more up-to-date peers download newer blocks, - // other state-machines will automatically terminate once they reach the highest block this - // peer has now processed. - let highest_processed_block_id = - StacksBlockId::new(&network.stacks_tip.0, &network.stacks_tip.1); - let highest_processed_block_height = network.stacks_tip.2; - - for (_, downloader) in downloaders.iter_mut() { - downloader.set_highest_processed_block( - highest_processed_block_id.clone(), - highest_processed_block_height, + if network.stacks_tip.is_nakamoto { + // find the highest-processed block, and update all ongoing state-machines. + // Then, as faster state-machines linked to more up-to-date peers download newer blocks, + // other state-machines will automatically terminate once they reach the highest block this + // peer has now processed. + let highest_processed_block_id = StacksBlockId::new( + &network.stacks_tip.consensus_hash, + &network.stacks_tip.block_hash, ); + let highest_processed_block_height = network.stacks_tip.height; + + for (_, downloader) in downloaders.iter_mut() { + downloader.set_highest_processed_block( + highest_processed_block_id.clone(), + highest_processed_block_height, + ); + } } // send requests @@ -1430,42 +1460,59 @@ impl NakamotoDownloadStateMachine { }; test_debug!("Got response from {}", &naddr); - let Ok(blocks_opt) = downloader.handle_next_download_response( + let blocks_opt = match downloader.handle_next_download_response( response, sortdb, sort_tip, chainstate, - &network.aggregate_public_keys, - ) else { - neighbor_rpc.add_dead(network, &naddr); - continue; + &network.current_reward_sets, + ) { + Ok(blocks_opt) => blocks_opt, + Err(NetError::StaleView) => { + continue; + } + Err(e) => { + debug!("Failed to handle next download response from unconfirmed downloader for {:?} in state {:?}: {:?}", &naddr, &downloader.state, &e); + neighbor_rpc.add_dead(network, &naddr); + continue; + } }; let Some(blocks) = blocks_opt else { continue; }; - if let Some(highest_complete_tenure_downloader) = downloader - .make_highest_complete_tenure_downloader( - highest_complete_tenure, - unconfirmed_tenure, - ) - .map_err(|e| { - warn!( - "Failed to make highest complete tenure downloader for {:?}: {:?}", - &downloader.unconfirmed_tenure_id(), - &e - ); - e - }) - .ok() + if downloader + .can_make_highest_complete_tenure_downloader(sortdb) + .unwrap_or(false) { - // don't start this unless the downloader is actually done (this should always be - // the case, but don't tempt fate with an assert!) - if downloader.is_done() { - highest_completed_tenure_downloaders - .insert(naddr.clone(), highest_complete_tenure_downloader); + if let Some(highest_complete_tenure_downloader) = downloader + .make_highest_complete_tenure_downloader() + .map_err(|e| { + warn!( + "Failed to make highest complete tenure downloader for {:?}: {:?}", + &downloader.unconfirmed_tenure_id(), + &e + ); + e + }) + .ok() + { + // don't start this unless the downloader is actually done (this should always be + // the case, but don't tempt fate with an assert!) + if downloader.is_done() { + test_debug!( + "Will fetch the highest complete tenure from {:?}", + &downloader.unconfirmed_tenure_id() + ); + highest_completed_tenure_downloaders + .insert(naddr.clone(), highest_complete_tenure_downloader); + } } + } else { + test_debug!( + "Will not make highest-complete tenure downloader (not a Nakamoto tenure)" + ); } unconfirmed_blocks.insert(naddr.clone(), blocks); @@ -1500,7 +1547,7 @@ impl NakamotoDownloadStateMachine { max_count: usize, ) -> HashMap> { // queue up more downloaders - self.update_tenure_downloaders(max_count, &network.aggregate_public_keys); + self.update_tenure_downloaders(max_count, &network.current_reward_sets); // run all downloaders let new_blocks = self.tenure_downloads.run(network, &mut self.neighbor_rpc); @@ -1542,71 +1589,6 @@ impl NakamotoDownloadStateMachine { // run all unconfirmed downloaders, and start confirmed downloaders for the // highest complete tenure let burnchain_tip = network.burnchain_tip.clone(); - let Some(unconfirmed_tenure) = self - .wanted_tenures - .last() - .map(|wt| Some(wt.clone())) - .unwrap_or_else(|| { - // unconfirmed tenure is the last tenure in prev_wanted_tenures if - // wanted_tenures.len() is 0 - let prev_wanted_tenures = self.prev_wanted_tenures.as_ref()?; - let wt = prev_wanted_tenures.last()?; - Some(wt.clone()) - }) - else { - // not initialized yet (technically unrachable) - return HashMap::new(); - }; - - // Get the highest WantedTenure. This will be the WantedTenure whose winning block hash is - // the start block hash of the highest complete tenure, and whose consensus hash is the - // tenure ID of the ongoing tenure. It corresponds to the highest sortition for which - // there exists a tenure. - // - // There are three possibilities for obtaining this, based on what we know about tenures - // from the sortition DB and the peers' inventories: - // - // Case 1: There are no sortitions yet in the current reward cycle, so this is the - // second-to-last WantedTenure in the last reward cycle's WantedTenure list. - // - // Case 2: There is one sortition in the current reward cycle, so this is the last - // WantedTenure in the last reward cycle's WantedTenure list - // - // Case 3: There are two or more sortitions in the current reward cycle, so this is the - // second-to-last WantedTenure in the current reward cycle's WantedTenure list. - let highest_wanted_tenure = if self.wanted_tenures.is_empty() { - // highest complete wanted tenure is the second-to-last tenure in prev_wanted_tenures - let Some(prev_wanted_tenures) = self.prev_wanted_tenures.as_ref() else { - // not initialized yet (technically unrachable) - return HashMap::new(); - }; - if prev_wanted_tenures.len() < 2 { - return HashMap::new(); - }; - let Some(wt) = prev_wanted_tenures.get(prev_wanted_tenures.len().saturating_sub(2)) - else { - return HashMap::new(); - }; - wt.clone() - } else if self.wanted_tenures.len() == 1 { - // highest complete tenure is the last tenure in prev_wanted_tenures - let Some(prev_wanted_tenures) = self.prev_wanted_tenures.as_ref() else { - return HashMap::new(); - }; - let Some(wt) = prev_wanted_tenures.last() else { - return HashMap::new(); - }; - wt.clone() - } else { - // highest complete tenure is the second-to-last tenure in wanted_tenures - let Some(wt) = self - .wanted_tenures - .get(self.wanted_tenures.len().saturating_sub(2)) - else { - return HashMap::new(); - }; - wt.clone() - }; // Run the confirmed downloader state machine set, since we could already be processing the // highest complete tenure download. NOTE: due to the way that we call this method, we're @@ -1634,8 +1616,6 @@ impl NakamotoDownloadStateMachine { sortdb, &burnchain_tip, chainstate, - &highest_wanted_tenure, - &unconfirmed_tenure, ) }; @@ -1700,9 +1680,10 @@ impl NakamotoDownloadStateMachine { return HashMap::new(); }; test_debug!( - "run_downloads: burnchain_height={}, network.burnchain_tip.block_height={}", + "run_downloads: burnchain_height={}, network.burnchain_tip.block_height={}, state={}", burnchain_height, - network.burnchain_tip.block_height + network.burnchain_tip.block_height, + &self.state ); self.update_available_tenures( &invs.inventories, @@ -1727,10 +1708,6 @@ impl NakamotoDownloadStateMachine { return HashMap::new(); }; - debug!( - "tenure_downloads.is_empty: {}", - self.tenure_downloads.is_empty() - ); if self.tenure_downloads.is_empty() && Self::need_unconfirmed_tenures( self.nakamoto_start_height, @@ -1743,7 +1720,6 @@ impl NakamotoDownloadStateMachine { &sortdb.pox_constants, sortdb.first_block_height, invs.inventories.values(), - chainstate.nakamoto_blocks_db(), ) { debug!( @@ -1763,14 +1739,20 @@ impl NakamotoDownloadStateMachine { return new_blocks; } NakamotoDownloadState::Unconfirmed => { - let highest_processed_block_id = - StacksBlockId::new(&network.stacks_tip.0, &network.stacks_tip.1); + let highest_processed_block_id = StacksBlockId::new( + &network.stacks_tip.consensus_hash, + &network.stacks_tip.block_hash, + ); let new_blocks = self.download_unconfirmed_tenures( network, sortdb, chainstate, - Some(highest_processed_block_id), + if network.stacks_tip.is_nakamoto { + Some(highest_processed_block_id) + } else { + None + }, ); // keep borrow-checker happy by instantiang this ref again, now that `network` is @@ -1781,8 +1763,15 @@ impl NakamotoDownloadStateMachine { return HashMap::new(); }; - if self.tenure_downloads.is_empty() - && self.unconfirmed_tenure_downloads.is_empty() + if !self.tenure_downloads.is_empty() { + // need to go get this scheduled tenure + debug!( + "Transition from {} to {} -- need confirmed tenure still", + &self.state, + NakamotoDownloadState::Confirmed + ); + self.state = NakamotoDownloadState::Confirmed; + } else if self.unconfirmed_tenure_downloads.is_empty() && self.unconfirmed_tenure_download_schedule.is_empty() { if Self::need_unconfirmed_tenures( @@ -1796,7 +1785,6 @@ impl NakamotoDownloadStateMachine { &sortdb.pox_constants, sortdb.first_block_height, invs.inventories.values(), - chainstate.nakamoto_blocks_db(), ) { // do this again self.unconfirmed_tenure_download_schedule = @@ -1832,9 +1820,11 @@ impl NakamotoDownloadStateMachine { burnchain_height: u64, network: &mut PeerNetwork, sortdb: &SortitionDB, - chainstate: &StacksChainState, + chainstate: &mut StacksChainState, ibd: bool, ) -> Result>, NetError> { + self.nakamoto_tip = network.stacks_tip.block_id(); + test_debug!("Downloader: Nakamoto tip is {:?}", &self.nakamoto_tip); self.update_wanted_tenures(&network, sortdb, chainstate)?; self.update_processed_tenures(chainstate)?; let new_blocks = self.run_downloads(burnchain_height, network, sortdb, chainstate, ibd); diff --git a/stackslib/src/net/download/nakamoto/mod.rs b/stackslib/src/net/download/nakamoto/mod.rs index ddef9796810..dd440ac110f 100644 --- a/stackslib/src/net/download/nakamoto/mod.rs +++ b/stackslib/src/net/download/nakamoto/mod.rs @@ -183,7 +183,8 @@ impl PeerNetwork { return; } let epoch = self.get_epoch_by_epoch_id(StacksEpochId::Epoch30); - let downloader = NakamotoDownloadStateMachine::new(epoch.start_height); + let downloader = + NakamotoDownloadStateMachine::new(epoch.start_height, self.stacks_tip.block_id()); self.block_downloader_nakamoto = Some(downloader); } @@ -192,7 +193,7 @@ impl PeerNetwork { &mut self, burnchain_height: u64, sortdb: &SortitionDB, - chainstate: &StacksChainState, + chainstate: &mut StacksChainState, ibd: bool, ) -> Result>, NetError> { if self.block_downloader_nakamoto.is_none() { @@ -214,9 +215,13 @@ impl PeerNetwork { &mut self, burnchain_height: u64, sortdb: &SortitionDB, - chainstate: &StacksChainState, + chainstate: &mut StacksChainState, ibd: bool, ) -> Result>, NetError> { + if self.connection_opts.disable_block_download { + return Ok(HashMap::new()); + } + let res = self.sync_blocks_nakamoto(burnchain_height, sortdb, chainstate, ibd)?; let Some(mut block_downloader) = self.block_downloader_nakamoto.take() else { diff --git a/stackslib/src/net/download/nakamoto/tenure.rs b/stackslib/src/net/download/nakamoto/tenure.rs index 53563ab3345..5e2e06c41a9 100644 --- a/stackslib/src/net/download/nakamoto/tenure.rs +++ b/stackslib/src/net/download/nakamoto/tenure.rs @@ -177,9 +177,6 @@ impl TenureStartEnd { let bit = u16::try_from(i).expect("FATAL: more sortitions than u16::MAX"); if !invbits.get(bit).unwrap_or(false) { test_debug!("i={} bit not set", i); - /* - i += 1; - */ continue; } diff --git a/stackslib/src/net/download/nakamoto/tenure_downloader.rs b/stackslib/src/net/download/nakamoto/tenure_downloader.rs index c5ea7ba3450..c6e5ee07038 100644 --- a/stackslib/src/net/download/nakamoto/tenure_downloader.rs +++ b/stackslib/src/net/download/nakamoto/tenure_downloader.rs @@ -31,7 +31,6 @@ use stacks_common::types::StacksEpochId; use stacks_common::util::hash::to_hex; use stacks_common::util::secp256k1::{Secp256k1PrivateKey, Secp256k1PublicKey}; use stacks_common::util::{get_epoch_time_ms, get_epoch_time_secs, log}; -use wsts::curve::point::Point; use crate::burnchains::{Burnchain, BurnchainView, PoxConstants}; use crate::chainstate::burn::db::sortdb::{ @@ -41,6 +40,7 @@ use crate::chainstate::burn::BlockSnapshot; use crate::chainstate::nakamoto::{ NakamotoBlock, NakamotoBlockHeader, NakamotoChainState, NakamotoStagingBlocksConnRef, }; +use crate::chainstate::stacks::boot::RewardSet; use crate::chainstate::stacks::db::StacksChainState; use crate::chainstate::stacks::{ Error as chainstate_error, StacksBlockHeader, TenureChangePayload, @@ -57,7 +57,7 @@ use crate::net::inv::epoch2x::InvState; use crate::net::inv::nakamoto::{NakamotoInvStateMachine, NakamotoTenureInv}; use crate::net::neighbors::rpc::NeighborRPC; use crate::net::neighbors::NeighborComms; -use crate::net::p2p::PeerNetwork; +use crate::net::p2p::{CurrentRewardSet, PeerNetwork}; use crate::net::server::HttpPeer; use crate::net::{Error as NetError, Neighbor, NeighborAddress, NeighborKey}; use crate::util_lib::db::{DBConn, Error as DBError}; @@ -129,8 +129,8 @@ impl fmt::Display for NakamotoTenureDownloadState { /// is configured to fetch the highest complete tenure (i.e. the parent of the ongoing tenure); /// in this case, the end-block is the start-block of the ongoing tenure. /// 3. Obtain the blocks that lie between the first and last blocks of the tenure, in reverse -/// order. As blocks are found, their signer signatures will be validated against the aggregate -/// public key for this tenure; their hash-chain continuity will be validated against the start +/// order. As blocks are found, their signer signatures will be validated against the signer +/// public keys for this tenure; their hash-chain continuity will be validated against the start /// and end block hashes; their quantity will be validated against the tenure-change transaction /// in the end-block. /// @@ -149,10 +149,10 @@ pub struct NakamotoTenureDownloader { pub tenure_end_block_id: StacksBlockId, /// Address of who we're asking for blocks pub naddr: NeighborAddress, - /// Aggregate public key that signed the start-block of this tenure - pub start_aggregate_public_key: Point, - /// Aggregate public key that signed the end-block of this tenure - pub end_aggregate_public_key: Point, + /// Signer public keys that signed the start-block of this tenure, in reward cycle order + pub start_signer_keys: RewardSet, + /// Signer public keys that signed the end-block of this tenure + pub end_signer_keys: RewardSet, /// Whether or not we're idle -- i.e. there are no ongoing network requests associated with /// this state machine. pub idle: bool, @@ -178,21 +178,23 @@ impl NakamotoTenureDownloader { tenure_start_block_id: StacksBlockId, tenure_end_block_id: StacksBlockId, naddr: NeighborAddress, - start_aggregate_public_key: Point, - end_aggregate_public_key: Point, + start_signer_keys: RewardSet, + end_signer_keys: RewardSet, ) -> Self { test_debug!( - "Instantiate downloader to {} for tenure {}", + "Instantiate downloader to {} for tenure {}: {}-{}", &naddr, - &tenure_id_consensus_hash + &tenure_id_consensus_hash, + &tenure_start_block_id, + &tenure_end_block_id, ); Self { tenure_id_consensus_hash, tenure_start_block_id, tenure_end_block_id, naddr, - start_aggregate_public_key, - end_aggregate_public_key, + start_signer_keys, + end_signer_keys, idle: false, state: NakamotoTenureDownloadState::GetTenureStartBlock(tenure_start_block_id.clone()), tenure_start_block: None, @@ -243,16 +245,16 @@ impl NakamotoTenureDownloader { return Err(NetError::InvalidMessage); } - if !tenure_start_block + if let Err(e) = tenure_start_block .header - .verify_signer(&self.start_aggregate_public_key) + .verify_signer_signatures(&self.start_signer_keys) { // signature verification failed warn!("Invalid tenure-start block: bad signer signature"; - "tenure_id" => %self.tenure_id_consensus_hash, - "block.header.block_id" => %tenure_start_block.header.block_id(), - "start_aggregate_public_key" => %self.start_aggregate_public_key, - "state" => %self.state); + "tenure_id" => %self.tenure_id_consensus_hash, + "block.header.block_id" => %tenure_start_block.header.block_id(), + "state" => %self.state, + "error" => %e); return Err(NetError::InvalidMessage); } @@ -369,16 +371,16 @@ impl NakamotoTenureDownloader { return Err(NetError::InvalidMessage); } - if !tenure_end_block + if let Err(e) = tenure_end_block .header - .verify_signer(&self.end_aggregate_public_key) + .verify_signer_signatures(&self.end_signer_keys) { // bad signature warn!("Invalid tenure-end block: bad signer signature"; "tenure_id" => %self.tenure_id_consensus_hash, "block.header.block_id" => %tenure_end_block.header.block_id(), - "end_aggregate_public_key" => %self.end_aggregate_public_key, - "state" => %self.state); + "state" => %self.state, + "error" => %e); return Err(NetError::InvalidMessage); } @@ -470,12 +472,15 @@ impl NakamotoTenureDownloader { return Err(NetError::InvalidMessage); } - if !block.header.verify_signer(&self.start_aggregate_public_key) { + if let Err(e) = block + .header + .verify_signer_signatures(&self.start_signer_keys) + { warn!("Invalid block: bad signer signature"; "tenure_id" => %self.tenure_id_consensus_hash, "block.header.block_id" => %block.header.block_id(), - "start_aggregate_public_key" => %self.start_aggregate_public_key, - "state" => %self.state); + "state" => %self.state, + "error" => %e); return Err(NetError::InvalidMessage); } @@ -653,7 +658,10 @@ impl NakamotoTenureDownloader { "Got download response for tenure-start block {}", &_block_id ); - let block = response.decode_nakamoto_block()?; + let block = response.decode_nakamoto_block().map_err(|e| { + warn!("Failed to decode response for a Nakamoto block: {:?}", &e); + e + })?; self.try_accept_tenure_start_block(block)?; Ok(None) } @@ -663,7 +671,10 @@ impl NakamotoTenureDownloader { } NakamotoTenureDownloadState::GetTenureEndBlock(_block_id) => { test_debug!("Got download response to tenure-end block {}", &_block_id); - let block = response.decode_nakamoto_block()?; + let block = response.decode_nakamoto_block().map_err(|e| { + warn!("Failed to decode response for a Nakamoto block: {:?}", &e); + e + })?; self.try_accept_tenure_end_block(&block)?; Ok(None) } @@ -672,7 +683,10 @@ impl NakamotoTenureDownloader { "Got download response for tenure blocks ending at {}", &_end_block_id ); - let blocks = response.decode_nakamoto_tenure()?; + let blocks = response.decode_nakamoto_tenure().map_err(|e| { + warn!("Failed to decode response for a Nakamoto tenure: {:?}", &e); + e + })?; self.try_accept_tenure_blocks(blocks) } NakamotoTenureDownloadState::Done => Err(NetError::InvalidState), diff --git a/stackslib/src/net/download/nakamoto/tenure_downloader_set.rs b/stackslib/src/net/download/nakamoto/tenure_downloader_set.rs index 357b588e8a4..8a154637cf4 100644 --- a/stackslib/src/net/download/nakamoto/tenure_downloader_set.rs +++ b/stackslib/src/net/download/nakamoto/tenure_downloader_set.rs @@ -31,16 +31,17 @@ use stacks_common::types::StacksEpochId; use stacks_common::util::hash::to_hex; use stacks_common::util::secp256k1::{Secp256k1PrivateKey, Secp256k1PublicKey}; use stacks_common::util::{get_epoch_time_ms, get_epoch_time_secs, log}; -use wsts::curve::point::Point; use crate::burnchains::{Burnchain, BurnchainView, PoxConstants}; use crate::chainstate::burn::db::sortdb::{ BlockHeaderCache, SortitionDB, SortitionDBConn, SortitionHandleConn, }; use crate::chainstate::burn::BlockSnapshot; +use crate::chainstate::coordinator::{PoxAnchorBlockStatus, RewardCycleInfo}; use crate::chainstate::nakamoto::{ NakamotoBlock, NakamotoBlockHeader, NakamotoChainState, NakamotoStagingBlocksConnRef, }; +use crate::chainstate::stacks::boot::RewardSet; use crate::chainstate::stacks::db::StacksChainState; use crate::chainstate::stacks::{ Error as chainstate_error, StacksBlockHeader, TenureChangePayload, @@ -61,7 +62,7 @@ use crate::net::inv::epoch2x::InvState; use crate::net::inv::nakamoto::{NakamotoInvStateMachine, NakamotoTenureInv}; use crate::net::neighbors::rpc::NeighborRPC; use crate::net::neighbors::NeighborComms; -use crate::net::p2p::PeerNetwork; +use crate::net::p2p::{CurrentRewardSet, PeerNetwork}; use crate::net::server::HttpPeer; use crate::net::{Error as NetError, Neighbor, NeighborAddress, NeighborKey}; use crate::util_lib::db::{DBConn, Error as DBError}; @@ -192,9 +193,19 @@ impl NakamotoTenureDownloaderSet { .is_some() } - /// Determine if this downloader set is empty -- i.e. there's no in-flight requests. + /// Determine if this downloader set is empty -- i.e. there's no in-progress downloaders. pub fn is_empty(&self) -> bool { - self.inflight() == 0 + for downloader_opt in self.downloaders.iter() { + let Some(downloader) = downloader_opt else { + continue; + }; + if downloader.is_done() { + continue; + } + test_debug!("TenureDownloadSet::is_empty(): have downloader for tenure {:?} assigned to {} in state {}", &downloader.tenure_id_consensus_hash, &downloader.naddr, &downloader.state); + return false; + } + true } /// Try to resume processing a download state machine with a given peer. Since a peer is @@ -418,7 +429,7 @@ impl NakamotoTenureDownloaderSet { available: &mut HashMap>, tenure_block_ids: &HashMap, count: usize, - agg_public_keys: &BTreeMap>, + current_reward_cycles: &BTreeMap, ) { test_debug!("schedule: {:?}", schedule); test_debug!("available: {:?}", &available); @@ -431,8 +442,8 @@ impl NakamotoTenureDownloaderSet { self.num_scheduled_downloaders() ); - self.clear_available_peers(); self.clear_finished_downloaders(); + self.clear_available_peers(); self.try_transition_fetch_tenure_end_blocks(tenure_block_ids); while self.inflight() < count { let Some(ch) = schedule.front() else { @@ -479,19 +490,25 @@ impl NakamotoTenureDownloaderSet { test_debug!("Neighbor {} does not serve tenure {}", &naddr, ch); continue; }; - let Some(Some(start_agg_pubkey)) = agg_public_keys.get(&tenure_info.start_reward_cycle) + let Some(Some(start_reward_set)) = current_reward_cycles + .get(&tenure_info.start_reward_cycle) + .map(|cycle_info| cycle_info.reward_set()) else { test_debug!( - "Cannot fetch tenure-start block due to no known aggregate public key: {:?}", + "Cannot fetch tenure-start block due to no known start reward set for cycle {}: {:?}", + tenure_info.start_reward_cycle, &tenure_info ); schedule.pop_front(); continue; }; - let Some(Some(end_agg_pubkey)) = agg_public_keys.get(&tenure_info.end_reward_cycle) + let Some(Some(end_reward_set)) = current_reward_cycles + .get(&tenure_info.end_reward_cycle) + .map(|cycle_info| cycle_info.reward_set()) else { test_debug!( - "Cannot fetch tenure-end block due to no known aggregate public key: {:?}", + "Cannot fetch tenure-end block due to no known end reward set for cycle {}: {:?}", + tenure_info.end_reward_cycle, &tenure_info ); schedule.pop_front(); @@ -499,12 +516,10 @@ impl NakamotoTenureDownloaderSet { }; test_debug!( - "Download tenure {} (start={}, end={}) with aggregate keys {}, {} (rc {},{})", + "Download tenure {} (start={}, end={}) (rc {},{})", &ch, &tenure_info.start_block_id, &tenure_info.end_block_id, - &start_agg_pubkey, - &end_agg_pubkey, tenure_info.start_reward_cycle, tenure_info.end_reward_cycle ); @@ -513,8 +528,8 @@ impl NakamotoTenureDownloaderSet { tenure_info.start_block_id.clone(), tenure_info.end_block_id.clone(), naddr.clone(), - start_agg_pubkey.clone(), - end_agg_pubkey.clone(), + start_reward_set.clone(), + end_reward_set.clone(), ); test_debug!("Request tenure {} from neighbor {}", ch, &naddr); @@ -604,7 +619,13 @@ impl NakamotoTenureDownloaderSet { }; test_debug!("Got response from {}", &naddr); - let Ok(blocks_opt) = downloader.handle_next_download_response(response) else { + let Ok(blocks_opt) = downloader + .handle_next_download_response(response) + .map_err(|e| { + debug!("Failed to handle response from {}: {:?}", &naddr, &e); + e + }) + else { test_debug!("Failed to handle download response from {}", &naddr); neighbor_rpc.add_dead(network, &naddr); continue; diff --git a/stackslib/src/net/download/nakamoto/tenure_downloader_unconfirmed.rs b/stackslib/src/net/download/nakamoto/tenure_downloader_unconfirmed.rs index 4c48a5762fb..d51e99d5a10 100644 --- a/stackslib/src/net/download/nakamoto/tenure_downloader_unconfirmed.rs +++ b/stackslib/src/net/download/nakamoto/tenure_downloader_unconfirmed.rs @@ -28,19 +28,20 @@ use stacks_common::types::chainstate::{ }; use stacks_common::types::net::{PeerAddress, PeerHost}; use stacks_common::types::StacksEpochId; -use stacks_common::util::hash::to_hex; +use stacks_common::util::hash::{to_hex, Sha512Trunc256Sum}; use stacks_common::util::secp256k1::{Secp256k1PrivateKey, Secp256k1PublicKey}; use stacks_common::util::{get_epoch_time_ms, get_epoch_time_secs, log}; -use wsts::curve::point::Point; use crate::burnchains::{Burnchain, BurnchainView, PoxConstants}; use crate::chainstate::burn::db::sortdb::{ BlockHeaderCache, SortitionDB, SortitionDBConn, SortitionHandleConn, }; use crate::chainstate::burn::BlockSnapshot; +use crate::chainstate::coordinator::RewardCycleInfo; use crate::chainstate::nakamoto::{ NakamotoBlock, NakamotoBlockHeader, NakamotoChainState, NakamotoStagingBlocksConnRef, }; +use crate::chainstate::stacks::boot::RewardSet; use crate::chainstate::stacks::db::StacksChainState; use crate::chainstate::stacks::{ Error as chainstate_error, StacksBlockHeader, TenureChangePayload, @@ -61,7 +62,7 @@ use crate::net::inv::epoch2x::InvState; use crate::net::inv::nakamoto::{NakamotoInvStateMachine, NakamotoTenureInv}; use crate::net::neighbors::rpc::NeighborRPC; use crate::net::neighbors::NeighborComms; -use crate::net::p2p::PeerNetwork; +use crate::net::p2p::{CurrentRewardSet, PeerNetwork}; use crate::net::server::HttpPeer; use crate::net::{Error as NetError, Neighbor, NeighborAddress, NeighborKey}; use crate::util_lib::db::{DBConn, Error as DBError}; @@ -77,8 +78,7 @@ pub enum NakamotoUnconfirmedDownloadState { /// The inner value is tenure-start block ID of the ongoing tenure. GetTenureStartBlock(StacksBlockId), /// Receiving unconfirmed tenure blocks. - /// The inner value is the _last_ block on the ongoing tenure. The ongoing tenure is fetched - /// from highest block to lowest block. + /// The inner value is the block ID of the next block to fetch. GetUnconfirmedTenureBlocks(StacksBlockId), /// We have gotten all the unconfirmed blocks for this tenure, and we now have the end block /// for the highest complete tenure (which can now be obtained via `NakamotoTenureDownloadState`). @@ -107,10 +107,10 @@ pub struct NakamotoUnconfirmedTenureDownloader { pub state: NakamotoUnconfirmedDownloadState, /// Address of who we're asking pub naddr: NeighborAddress, - /// Aggregate public key of the highest confirmed tenure - pub confirmed_aggregate_public_key: Option, - /// Aggregate public key of the unconfirmed (ongoing) tenure - pub unconfirmed_aggregate_public_key: Option, + /// reward set of the highest confirmed tenure + pub confirmed_signer_keys: Option, + /// reward set of the unconfirmed (ongoing) tenure + pub unconfirmed_signer_keys: Option, /// Block ID of this node's highest-processed block. /// We will not download any blocks lower than this, if it's set. pub highest_processed_block_id: Option, @@ -133,8 +133,8 @@ impl NakamotoUnconfirmedTenureDownloader { Self { state: NakamotoUnconfirmedDownloadState::GetTenureInfo, naddr, - confirmed_aggregate_public_key: None, - unconfirmed_aggregate_public_key: None, + confirmed_signer_keys: None, + unconfirmed_signer_keys: None, highest_processed_block_id, highest_processed_block_height: None, tenure_tip: None, @@ -185,7 +185,7 @@ impl NakamotoUnconfirmedTenureDownloader { local_sort_tip: &BlockSnapshot, chainstate: &StacksChainState, remote_tenure_tip: RPCGetTenureInfo, - agg_pubkeys: &BTreeMap>, + current_reward_sets: &BTreeMap, ) -> Result<(), NetError> { if self.state != NakamotoUnconfirmedDownloadState::GetTenureInfo { return Err(NetError::InvalidState); @@ -194,22 +194,45 @@ impl NakamotoUnconfirmedTenureDownloader { return Err(NetError::InvalidState); } + test_debug!("Got tenure info {:?}", remote_tenure_tip); + test_debug!("Local sortition tip is {}", &local_sort_tip.consensus_hash); + // authenticate consensus hashes against canonical chain history let local_tenure_sn = SortitionDB::get_block_snapshot_consensus( sortdb.conn(), &remote_tenure_tip.consensus_hash, )? - .ok_or(NetError::DBError(DBError::NotFoundError))?; + .ok_or_else(|| { + debug!( + "No snapshot for tenure {}", + &remote_tenure_tip.consensus_hash + ); + NetError::DBError(DBError::NotFoundError) + })?; let parent_local_tenure_sn = SortitionDB::get_block_snapshot_consensus( sortdb.conn(), &remote_tenure_tip.parent_consensus_hash, )? - .ok_or(NetError::DBError(DBError::NotFoundError))?; + .ok_or_else(|| { + debug!( + "No snapshot for parent tenure {}", + &remote_tenure_tip.parent_consensus_hash + ); + NetError::DBError(DBError::NotFoundError) + })?; let ih = sortdb.index_handle(&local_sort_tip.sortition_id); let ancestor_local_tenure_sn = ih .get_block_snapshot_by_height(local_tenure_sn.block_height)? - .ok_or(NetError::DBError(DBError::NotFoundError))?; + .ok_or_else(|| { + debug!( + "No tenure snapshot at burn block height {} off of sortition {} ({})", + local_tenure_sn.block_height, + &local_tenure_sn.sortition_id, + &local_tenure_sn.consensus_hash + ); + NetError::DBError(DBError::NotFoundError) + })?; if ancestor_local_tenure_sn.sortition_id != local_tenure_sn.sortition_id { // .consensus_hash is not on the canonical fork @@ -220,7 +243,15 @@ impl NakamotoUnconfirmedTenureDownloader { } let ancestor_parent_local_tenure_sn = ih .get_block_snapshot_by_height(parent_local_tenure_sn.block_height)? - .ok_or(NetError::DBError(DBError::NotFoundError.into()))?; + .ok_or_else(|| { + debug!( + "No parent tenure snapshot at burn block height {} off of sortition {} ({})", + local_tenure_sn.block_height, + &local_tenure_sn.sortition_id, + &local_tenure_sn.consensus_hash + ); + NetError::DBError(DBError::NotFoundError.into()) + })?; if ancestor_parent_local_tenure_sn.sortition_id != parent_local_tenure_sn.sortition_id { // .parent_consensus_hash is not on the canonical fork @@ -244,18 +275,21 @@ impl NakamotoUnconfirmedTenureDownloader { if local_tenure_sn.winning_stacks_block_hash.0 != remote_tenure_tip.parent_tenure_start_block_id.0 { - warn!("Ongoing tenure does not commit to highest complete tenure's start block"; - "remote_tenure_tip.tenure_start_block_id" => %remote_tenure_tip.tenure_start_block_id, + debug!("Ongoing tenure does not commit to highest complete tenure's start block. Treating remote peer {} as stale.", &self.naddr; + "remote_tenure_tip.tenure_start_block_id" => %remote_tenure_tip.parent_tenure_start_block_id, "local_tenure_sn.winning_stacks_block_hash" => %local_tenure_sn.winning_stacks_block_hash); - return Err(NetError::InvalidMessage); + return Err(NetError::StaleView); } if let Some(highest_processed_block_id) = self.highest_processed_block_id.as_ref() { - // we've synchronize this tenure before, so don't get anymore blocks before it. + // we've synchronized this tenure before, so don't get anymore blocks before it. let highest_processed_block = chainstate .nakamoto_blocks_db() .get_nakamoto_block(highest_processed_block_id)? - .ok_or(NetError::DBError(DBError::NotFoundError))? + .ok_or_else(|| { + debug!("No such Nakamoto block {}", &highest_processed_block_id); + NetError::DBError(DBError::NotFoundError) + })? .0; let highest_processed_block_height = highest_processed_block.header.chain_length; @@ -297,21 +331,24 @@ impl NakamotoUnconfirmedTenureDownloader { ) .expect("FATAL: sortition from before system start"); - // get aggregate public keys for the unconfirmed tenure and highest-complete tenure sortitions - let Some(Some(confirmed_aggregate_public_key)) = - agg_pubkeys.get(&parent_tenure_rc).cloned() + // get reward set info for the unconfirmed tenure and highest-complete tenure sortitions + let Some(Some(confirmed_reward_set)) = current_reward_sets + .get(&parent_tenure_rc) + .map(|cycle_info| cycle_info.reward_set()) else { warn!( - "No aggregate public key for confirmed tenure {} (rc {})", + "No signer public keys for confirmed tenure {} (rc {})", &parent_local_tenure_sn.consensus_hash, parent_tenure_rc ); return Err(NetError::InvalidState); }; - let Some(Some(unconfirmed_aggregate_public_key)) = agg_pubkeys.get(&tenure_rc).cloned() + let Some(Some(unconfirmed_reward_set)) = current_reward_sets + .get(&tenure_rc) + .map(|cycle_info| cycle_info.reward_set()) else { warn!( - "No aggregate public key for confirmed tenure {} (rc {})", + "No signer public keys for unconfirmed tenure {} (rc {})", &local_tenure_sn.consensus_hash, tenure_rc ); return Err(NetError::InvalidState); @@ -319,13 +356,19 @@ impl NakamotoUnconfirmedTenureDownloader { if chainstate .nakamoto_blocks_db() - .has_nakamoto_block(&remote_tenure_tip.tenure_start_block_id.clone())? + .has_nakamoto_block_with_index_hash(&remote_tenure_tip.tenure_start_block_id.clone())? { // proceed to get unconfirmed blocks. We already have the tenure-start block. let unconfirmed_tenure_start_block = chainstate .nakamoto_blocks_db() .get_nakamoto_block(&remote_tenure_tip.tenure_start_block_id)? - .ok_or(NetError::DBError(DBError::NotFoundError))? + .ok_or_else(|| { + debug!( + "No such tenure-start Nakamoto block {}", + &remote_tenure_tip.tenure_start_block_id + ); + NetError::DBError(DBError::NotFoundError) + })? .0; self.unconfirmed_tenure_start_block = Some(unconfirmed_tenure_start_block); self.state = NakamotoUnconfirmedDownloadState::GetUnconfirmedTenureBlocks( @@ -339,14 +382,12 @@ impl NakamotoUnconfirmedTenureDownloader { } test_debug!( - "Will validate unconfirmed blocks with ({},{}) and ({},{})", - &confirmed_aggregate_public_key, + "Will validate unconfirmed blocks with reward sets in ({},{})", parent_tenure_rc, - &unconfirmed_aggregate_public_key, tenure_rc ); - self.confirmed_aggregate_public_key = Some(confirmed_aggregate_public_key); - self.unconfirmed_aggregate_public_key = Some(unconfirmed_aggregate_public_key); + self.confirmed_signer_keys = Some(confirmed_reward_set.clone()); + self.unconfirmed_signer_keys = Some(unconfirmed_reward_set.clone()); self.tenure_tip = Some(remote_tenure_tip); Ok(()) @@ -367,23 +408,25 @@ impl NakamotoUnconfirmedTenureDownloader { return Err(NetError::InvalidState); }; let Some(tenure_tip) = self.tenure_tip.as_ref() else { + warn!("tenure_tip is not set"); return Err(NetError::InvalidState); }; - let Some(unconfirmed_aggregate_public_key) = self.unconfirmed_aggregate_public_key.as_ref() - else { + + let Some(unconfirmed_signer_keys) = self.unconfirmed_signer_keys.as_ref() else { + warn!("unconfirmed_signer_keys is not set"); return Err(NetError::InvalidState); }; - // stacker signature has to match the current aggregate public key - if !unconfirmed_tenure_start_block + // stacker signature has to match the current reward set + if let Err(e) = unconfirmed_tenure_start_block .header - .verify_signer(unconfirmed_aggregate_public_key) + .verify_signer_signatures(unconfirmed_signer_keys) { warn!("Invalid tenure-start block: bad signer signature"; "tenure_start_block.header.consensus_hash" => %unconfirmed_tenure_start_block.header.consensus_hash, "tenure_start_block.header.block_id" => %unconfirmed_tenure_start_block.header.block_id(), - "unconfirmed_aggregate_public_key" => %unconfirmed_aggregate_public_key, - "state" => %self.state); + "state" => %self.state, + "error" => %e); return Err(NetError::InvalidMessage); } @@ -431,15 +474,18 @@ impl NakamotoUnconfirmedTenureDownloader { }; let Some(tenure_tip) = self.tenure_tip.as_ref() else { + warn!("tenure_tip is not set"); return Err(NetError::InvalidState); }; - let Some(unconfirmed_aggregate_public_key) = self.unconfirmed_aggregate_public_key.as_ref() - else { + + let Some(unconfirmed_signer_keys) = self.unconfirmed_signer_keys.as_ref() else { + warn!("unconfirmed_signer_keys is not set"); return Err(NetError::InvalidState); }; if tenure_blocks.is_empty() { // nothing to do + debug!("No tenure blocks obtained"); return Ok(None); } @@ -447,6 +493,7 @@ impl NakamotoUnconfirmedTenureDownloader { // If there's a tenure-start block, it must be last. let mut expected_block_id = last_block_id; let mut finished_download = false; + let mut last_block_index = None; for (cnt, block) in tenure_blocks.iter().enumerate() { if &block.header.block_id() != expected_block_id { warn!("Unexpected Nakamoto block -- not part of tenure"; @@ -454,12 +501,15 @@ impl NakamotoUnconfirmedTenureDownloader { "block_id" => %block.header.block_id()); return Err(NetError::InvalidMessage); } - if !block.header.verify_signer(unconfirmed_aggregate_public_key) { + if let Err(e) = block + .header + .verify_signer_signatures(unconfirmed_signer_keys) + { warn!("Invalid block: bad signer signature"; "tenure_id" => %tenure_tip.consensus_hash, "block.header.block_id" => %block.header.block_id(), - "unconfirmed_aggregate_public_key" => %unconfirmed_aggregate_public_key, - "state" => %self.state); + "state" => %self.state, + "error" => %e); return Err(NetError::InvalidMessage); } @@ -493,15 +543,20 @@ impl NakamotoUnconfirmedTenureDownloader { } finished_download = true; + last_block_index = Some(cnt); break; } + test_debug!("Got unconfirmed tenure block {}", &block.header.block_id()); + // NOTE: this field can get updated by the downloader while this state-machine is in // this state. if let Some(highest_processed_block_id) = self.highest_processed_block_id.as_ref() { if expected_block_id == highest_processed_block_id { // got all the blocks we asked for + debug!("Cancelling unconfirmed tenure download to {}: have processed block up to block {} already", &self.naddr, highest_processed_block_id); finished_download = true; + last_block_index = Some(cnt); break; } } @@ -511,15 +566,22 @@ impl NakamotoUnconfirmedTenureDownloader { if let Some(highest_processed_block_height) = self.highest_processed_block_height.as_ref() { - if &block.header.chain_length < highest_processed_block_height { + if &block.header.chain_length <= highest_processed_block_height { // no need to continue this download debug!("Cancelling unconfirmed tenure download to {}: have processed block at height {} already", &self.naddr, highest_processed_block_height); finished_download = true; + last_block_index = Some(cnt); break; } } expected_block_id = &block.header.parent_block_id; + last_block_index = Some(cnt); + } + + // blocks after the last_block_index were not processed, so should be dropped + if let Some(last_block_index) = last_block_index { + tenure_blocks.truncate(last_block_index + 1); } if let Some(blocks) = self.unconfirmed_tenure_blocks.as_mut() { @@ -534,6 +596,8 @@ impl NakamotoUnconfirmedTenureDownloader { self.state = NakamotoUnconfirmedDownloadState::Done; let highest_processed_block_height = *self.highest_processed_block_height.as_ref().unwrap_or(&0); + + test_debug!("Finished receiving unconfirmed tenure"); return Ok(self.unconfirmed_tenure_blocks.take().map(|blocks| { blocks .into_iter() @@ -557,6 +621,10 @@ impl NakamotoUnconfirmedTenureDownloader { }; let next_block_id = earliest_block.header.parent_block_id.clone(); + test_debug!( + "Will resume fetching unconfirmed tenure blocks starting at {}", + &next_block_id + ); self.state = NakamotoUnconfirmedDownloadState::GetUnconfirmedTenureBlocks(next_block_id); Ok(None) } @@ -589,6 +657,56 @@ impl NakamotoUnconfirmedTenureDownloader { )?) } + /// Determine if we can produce a highest-complete tenure request. + /// This can be false if the tenure tip isn't present, or it doesn't point to a Nakamoto tenure + pub fn can_make_highest_complete_tenure_downloader( + &self, + sortdb: &SortitionDB, + ) -> Result { + let Some(tenure_tip) = &self.tenure_tip else { + return Ok(false); + }; + + let Some(parent_sn) = SortitionDB::get_block_snapshot_consensus( + sortdb.conn(), + &tenure_tip.parent_consensus_hash, + )? + else { + return Ok(false); + }; + + let Some(tip_sn) = + SortitionDB::get_block_snapshot_consensus(sortdb.conn(), &tenure_tip.consensus_hash)? + else { + return Ok(false); + }; + + let Some(parent_tenure) = + SortitionDB::get_stacks_epoch(sortdb.conn(), parent_sn.block_height)? + else { + return Ok(false); + }; + + let Some(tip_tenure) = SortitionDB::get_stacks_epoch(sortdb.conn(), tip_sn.block_height)? + else { + return Ok(false); + }; + + if parent_tenure.epoch_id < StacksEpochId::Epoch30 + || tip_tenure.epoch_id < StacksEpochId::Epoch30 + { + debug!("Cannot make highest complete tenure: start and/or end block is not a Nakamoto block"; + "start_tenure" => %tenure_tip.parent_consensus_hash, + "end_tenure" => %tenure_tip.consensus_hash, + "start_tenure_epoch" => %parent_tenure.epoch_id, + "end_tenure_epoch" => %tip_tenure.epoch_id + ); + return Ok(false); + } + + Ok(true) + } + /// Create a NakamotoTenureDownloader for the highest complete tenure. We already have the /// tenure-end block (which will be supplied to the downloader), but we'll still want to go get /// its tenure-start block. @@ -597,38 +715,33 @@ impl NakamotoUnconfirmedTenureDownloader { /// Returns Err(..) if we call this function out of sequence. pub fn make_highest_complete_tenure_downloader( &self, - highest_tenure: &WantedTenure, - unconfirmed_tenure: &WantedTenure, ) -> Result { if self.state != NakamotoUnconfirmedDownloadState::Done { return Err(NetError::InvalidState); } - let Some(unconfirmed_tenure_start_block) = self.unconfirmed_tenure_start_block.as_ref() - else { + let Some(tenure_tip) = &self.tenure_tip else { return Err(NetError::InvalidState); }; - let Some(confirmed_aggregate_public_key) = self.confirmed_aggregate_public_key.as_ref() - else { + let Some(confirmed_signer_keys) = self.confirmed_signer_keys.as_ref() else { return Err(NetError::InvalidState); }; - let Some(unconfirmed_aggregate_public_key) = self.unconfirmed_aggregate_public_key.as_ref() - else { + let Some(unconfirmed_signer_keys) = self.unconfirmed_signer_keys.as_ref() else { return Err(NetError::InvalidState); }; test_debug!( - "Create highest complete tenure downloader for {}", - &highest_tenure.tenure_id_consensus_hash + "Create downloader for highest complete tenure {} known by {}", + &tenure_tip.parent_consensus_hash, + &self.naddr, ); let ntd = NakamotoTenureDownloader::new( - highest_tenure.tenure_id_consensus_hash.clone(), - unconfirmed_tenure.winning_block_id.clone(), - unconfirmed_tenure_start_block.header.block_id(), + tenure_tip.parent_consensus_hash.clone(), + tenure_tip.parent_tenure_start_block_id.clone(), + tenure_tip.tenure_start_block_id.clone(), self.naddr.clone(), - confirmed_aggregate_public_key.clone(), - unconfirmed_aggregate_public_key.clone(), - ) - .with_tenure_end_block(unconfirmed_tenure_start_block.clone()); + confirmed_signer_keys.clone(), + unconfirmed_signer_keys.clone(), + ); Ok(ntd) } @@ -714,7 +827,7 @@ impl NakamotoUnconfirmedTenureDownloader { sortdb: &SortitionDB, local_sort_tip: &BlockSnapshot, chainstate: &StacksChainState, - agg_pubkeys: &BTreeMap>, + current_reward_sets: &BTreeMap, ) -> Result>, NetError> { match &self.state { NakamotoUnconfirmedDownloadState::GetTenureInfo => { @@ -726,7 +839,7 @@ impl NakamotoUnconfirmedTenureDownloader { local_sort_tip, chainstate, remote_tenure_info, - agg_pubkeys, + current_reward_sets, )?; Ok(None) } @@ -739,7 +852,9 @@ impl NakamotoUnconfirmedTenureDownloader { NakamotoUnconfirmedDownloadState::GetUnconfirmedTenureBlocks(..) => { test_debug!("Got unconfirmed tenure blocks response"); let blocks = response.decode_nakamoto_tenure()?; - self.try_accept_unconfirmed_tenure_blocks(blocks) + let accepted_opt = self.try_accept_unconfirmed_tenure_blocks(blocks)?; + test_debug!("Got unconfirmed tenure blocks"; "complete" => accepted_opt.is_some()); + Ok(accepted_opt) } NakamotoUnconfirmedDownloadState::Done => { return Err(NetError::InvalidState); diff --git a/stackslib/src/net/httpcore.rs b/stackslib/src/net/httpcore.rs index 78a2036ae32..dec51df42ae 100644 --- a/stackslib/src/net/httpcore.rs +++ b/stackslib/src/net/httpcore.rs @@ -797,18 +797,25 @@ impl StacksHttpRecvStream { let mut decoded_buf = vec![0u8; CHUNK_BUF_LEN]; let (read_pass, consumed_pass) = match self.state.do_read(fd, &mut decoded_buf) { Ok((0, num_consumed)) => { - trace!( + test_debug!( "consume_data blocked on 0 decoded bytes ({} consumed)", num_consumed ); blocked = true; (0, num_consumed) } - Ok((num_read, num_consumed)) => (num_read, num_consumed), + Ok((num_read, num_consumed)) => { + test_debug!( + "consume_data read {} bytes ({} consumed)", + num_read, + num_consumed + ); + (num_read, num_consumed) + } Err(e) => { if e.kind() == io::ErrorKind::WouldBlock || e.kind() == io::ErrorKind::TimedOut { - trace!("consume_data blocked on read error"); + test_debug!("consume_data blocked on read error"); blocked = true; (0, 0) } else { @@ -1466,7 +1473,7 @@ impl ProtocolFamily for StacksHttp { } None => { // need more data - trace!( + test_debug!( "did not read http response payload, but buffered {}", num_read ); diff --git a/stackslib/src/net/inv/epoch2x.rs b/stackslib/src/net/inv/epoch2x.rs index 8df013a8c07..62a5d024706 100644 --- a/stackslib/src/net/inv/epoch2x.rs +++ b/stackslib/src/net/inv/epoch2x.rs @@ -52,10 +52,7 @@ pub const INV_SYNC_INTERVAL: u64 = 150; #[cfg(test)] pub const INV_SYNC_INTERVAL: u64 = 3; -#[cfg(not(test))] pub const INV_REWARD_CYCLES: u64 = 2; -#[cfg(test)] -pub const INV_REWARD_CYCLES: u64 = 1; #[derive(Debug, PartialEq, Clone)] pub struct PeerBlocksInv { @@ -1083,7 +1080,7 @@ impl InvState { pub fn cull_bad_peers(&mut self) -> HashSet { let mut bad_peers = HashSet::new(); for (nk, stats) in self.block_stats.iter() { - if stats.status == NodeStatus::Broken || stats.status == NodeStatus::Dead { + if stats.status == NodeStatus::Broken { debug!( "Peer {:?} has node status {:?}; culling...", nk, &stats.status @@ -1756,7 +1753,7 @@ impl PeerNetwork { } /// Determine at which reward cycle to begin scanning inventories - fn get_block_scan_start(&self, sortdb: &SortitionDB, highest_remote_reward_cycle: u64) -> u64 { + pub(crate) fn get_block_scan_start(&self, sortdb: &SortitionDB) -> u64 { // see if the stacks tip affirmation map and heaviest affirmation map diverge. If so, then // start scaning at the reward cycle just before that. let am_rescan_rc = self @@ -1783,19 +1780,18 @@ impl PeerNetwork { .block_height_to_reward_cycle(stacks_tip_burn_block_height) .unwrap_or(0); - let start_reward_cycle = cmp::min( - stacks_tip_rc, - highest_remote_reward_cycle.saturating_sub(self.connection_opts.inv_reward_cycles), - ); + let start_reward_cycle = + stacks_tip_rc.saturating_sub(self.connection_opts.inv_reward_cycles); let rescan_rc = cmp::min(am_rescan_rc, start_reward_cycle); test_debug!( - "begin blocks inv scan at {} = min({},{},{})", + "begin blocks inv scan at {} = min({},{}) stacks_tip_am={} heaviest_am={}", rescan_rc, - stacks_tip_rc, - highest_remote_reward_cycle.saturating_sub(self.connection_opts.inv_reward_cycles), - am_rescan_rc + am_rescan_rc, + start_reward_cycle, + &self.stacks_tip_affirmation_map, + &self.heaviest_affirmation_map ); rescan_rc } @@ -1814,12 +1810,7 @@ impl PeerNetwork { Some(x) => x, None => { // proceed to block scan - let scan_start_rc = self.get_block_scan_start( - sortdb, - self.burnchain - .block_height_to_reward_cycle(stats.inv.get_block_height()) - .unwrap_or(0), - ); + let scan_start_rc = self.get_block_scan_start(sortdb); debug!("{:?}: cannot make any more GetPoxInv requests for {:?}; proceeding to block inventory scan at reward cycle {}", &self.local_peer, nk, scan_start_rc); stats.reset_block_scan(scan_start_rc); @@ -1876,12 +1867,7 @@ impl PeerNetwork { // proceed with block scan. // If we're in IBD, then this is an always-allowed peer and we should // react to divergences by deepening our rescan. - let scan_start_rc = self.get_block_scan_start( - sortdb, - self.burnchain - .block_height_to_reward_cycle(stats.inv.get_block_height()) - .unwrap_or(0), - ); + let scan_start_rc = self.get_block_scan_start(sortdb); debug!( "{:?}: proceeding to block inventory scan for {:?} (diverged) at reward cycle {} (ibd={})", &self.local_peer, nk, scan_start_rc, ibd @@ -1982,12 +1968,7 @@ impl PeerNetwork { } // proceed to block scan. - let scan_start = self.get_block_scan_start( - sortdb, - self.burnchain - .block_height_to_reward_cycle(stats.inv.get_block_height()) - .unwrap_or(0), - ); + let scan_start = self.get_block_scan_start(sortdb); debug!( "{:?}: proceeding to block inventory scan for {:?} at reward cycle {}", &self.local_peer, nk, scan_start @@ -2368,7 +2349,6 @@ impl PeerNetwork { .unwrap_or(network.burnchain.reward_cycle_to_block_height( network.get_block_scan_start( sortdb, - network.pox_id.num_inventory_reward_cycles() as u64, ), )) .saturating_sub(sortdb.first_block_height); @@ -2455,6 +2435,10 @@ impl PeerNetwork { good_sync_peers_set.insert(random_sync_peers_list[i].clone()); } } else { + // make *sure* this list isn't empty + for bootstrap_peer in bootstrap_peers.iter() { + good_sync_peers_set.insert(bootstrap_peer.clone()); + } debug!( "{:?}: in initial block download; only inv-sync with {} always-allowed peers", &network.local_peer, @@ -2661,8 +2645,32 @@ impl PeerNetwork { (done, throttled) } + /// Check to see if an epcoh2x peer has fully sync'ed. + /// (has crate visibility for testing) + pub(crate) fn check_peer_epoch2x_synced( + &self, + ibd: bool, + num_reward_cycles_synced: u64, + ) -> bool { + // either not in IBD, and we've sync'ed the highest reward cycle in the PoX vector, + // OR, + // in IBD, and we've sync'ed up to the highest sortition's reward cycle. + // + // The difference is that in the former case, the PoX inventory vector will be as long as + // the sortition history, but the number of reward cycles tracked by the inv state machine + // may be less when the node is booting up. So, we preface that check by also checking + // that we're in steady-state mode (i.e. not IBD). + (!ibd && num_reward_cycles_synced >= self.pox_id.num_inventory_reward_cycles() as u64) + || (ibd + && num_reward_cycles_synced + >= self + .burnchain + .block_height_to_reward_cycle(self.burnchain_tip.block_height) + .expect("FATAL: sortition has no reward cycle")) + } + /// Check to see if an always-allowed peer has performed an epoch 2.x inventory sync - fn check_always_allowed_peer_inv_sync_epoch2x(&self) -> bool { + fn check_always_allowed_peer_inv_sync_epoch2x(&self, ibd: bool) -> bool { // only count an inv_sync as passing if there's an always-allowed node // in our inv state let always_allowed: HashSet<_> = @@ -2702,7 +2710,7 @@ impl PeerNetwork { continue; } - if stats.inv.num_reward_cycles >= self.pox_id.num_inventory_reward_cycles() as u64 { + if self.check_peer_epoch2x_synced(ibd, stats.inv.num_reward_cycles) { // we have fully sync'ed with an always-allowed peer debug!( "{:?}: Fully-sync'ed PoX inventory from {}", @@ -2763,7 +2771,7 @@ impl PeerNetwork { return work_state; } - let finished_always_allowed_inv_sync = self.check_always_allowed_peer_inv_sync_epoch2x(); + let finished_always_allowed_inv_sync = self.check_always_allowed_peer_inv_sync_epoch2x(ibd); if finished_always_allowed_inv_sync { debug!( "{:?}: synchronized inventories with at least one always-allowed peer", diff --git a/stackslib/src/net/inv/nakamoto.rs b/stackslib/src/net/inv/nakamoto.rs index de46d15744a..491d0bcaca6 100644 --- a/stackslib/src/net/inv/nakamoto.rs +++ b/stackslib/src/net/inv/nakamoto.rs @@ -17,11 +17,12 @@ use std::collections::{BTreeMap, HashMap}; use stacks_common::bitvec::BitVec; +use stacks_common::types::chainstate::StacksBlockId; use stacks_common::types::StacksEpochId; use stacks_common::util::get_epoch_time_secs; use crate::burnchains::PoxConstants; -use crate::chainstate::burn::db::sortdb::SortitionDB; +use crate::chainstate::burn::db::sortdb::{SortitionDB, SortitionHandle}; use crate::chainstate::burn::{BlockSnapshot, ConsensusHash}; use crate::chainstate::nakamoto::NakamotoChainState; use crate::chainstate::stacks::db::StacksChainState; @@ -72,21 +73,29 @@ pub(crate) struct InvTenureInfo { impl InvTenureInfo { /// Load up cacheable tenure state for a given tenure-ID consensus hash. - /// This only returns Ok(Some(..)) if there was a tenure-change tx for this consensus hash. + /// This only returns Ok(Some(..)) if there was a tenure-change tx for this consensus hash + /// (i.e. it was a BlockFound tenure, not an Extension tenure) pub fn load( chainstate: &StacksChainState, - consensus_hash: &ConsensusHash, + tip_block_id: &StacksBlockId, + tenure_id_consensus_hash: &ConsensusHash, ) -> Result, NetError> { - Ok( - NakamotoChainState::get_highest_nakamoto_tenure_change_by_tenure_id( - chainstate.db(), - consensus_hash, - )? - .map(|tenure| Self { + Ok(NakamotoChainState::get_block_found_tenure( + &mut chainstate.index_conn(), + tip_block_id, + tenure_id_consensus_hash, + )? + .map(|tenure| { + test_debug!("BlockFound tenure for {}", &tenure_id_consensus_hash); + Self { tenure_id_consensus_hash: tenure.tenure_id_consensus_hash, parent_tenure_id_consensus_hash: tenure.prev_tenure_id_consensus_hash, - }), - ) + } + }) + .or_else(|| { + test_debug!("No BlockFound tenure for {}", &tenure_id_consensus_hash); + None + })) } } @@ -113,13 +122,15 @@ impl InvGenerator { fn get_processed_tenure( &mut self, chainstate: &StacksChainState, + tip_block_id: &StacksBlockId, tenure_id_consensus_hash: &ConsensusHash, ) -> Result, NetError> { if let Some(info_opt) = self.processed_tenures.get(&tenure_id_consensus_hash) { return Ok((*info_opt).clone()); }; // not cached so go load it - let loaded_info_opt = InvTenureInfo::load(chainstate, &tenure_id_consensus_hash)?; + let loaded_info_opt = + InvTenureInfo::load(chainstate, tip_block_id, &tenure_id_consensus_hash)?; self.processed_tenures .insert(tenure_id_consensus_hash.clone(), loaded_info_opt.clone()); Ok(loaded_info_opt) @@ -144,9 +155,12 @@ impl InvGenerator { tip: &BlockSnapshot, sortdb: &SortitionDB, chainstate: &StacksChainState, + nakamoto_tip: &StacksBlockId, reward_cycle: u64, ) -> Result, NetError> { let ih = sortdb.index_handle(&tip.sortition_id); + + // N.B. reward_cycle_to_block_height starts at reward index 1 let reward_cycle_end_height = sortdb .pox_constants .reward_cycle_to_block_height(sortdb.first_block_height, reward_cycle + 1) @@ -162,7 +176,8 @@ impl InvGenerator { let mut cur_height = reward_cycle_end_tip.block_height; let mut cur_consensus_hash = reward_cycle_end_tip.consensus_hash; - let mut cur_tenure_opt = self.get_processed_tenure(chainstate, &cur_consensus_hash)?; + let mut cur_tenure_opt = + self.get_processed_tenure(chainstate, &nakamoto_tip, &cur_consensus_hash)?; // loop variables and invariants: // @@ -218,6 +233,7 @@ impl InvGenerator { tenure_status.push(true); cur_tenure_opt = self.get_processed_tenure( chainstate, + &nakamoto_tip, &cur_tenure_info.parent_tenure_id_consensus_hash, )?; } else { @@ -228,8 +244,11 @@ impl InvGenerator { // no active tenure during this sortition. Check the parent sortition to see if a // tenure begain there. tenure_status.push(false); - cur_tenure_opt = - self.get_processed_tenure(chainstate, &parent_sortition_consensus_hash)?; + cur_tenure_opt = self.get_processed_tenure( + chainstate, + &nakamoto_tip, + &parent_sortition_consensus_hash, + )?; } // next sortition @@ -247,8 +266,6 @@ impl InvGenerator { #[derive(Debug, PartialEq, Clone)] pub struct NakamotoTenureInv { - /// What state is the machine in? - pub state: NakamotoInvState, /// Bitmap of which tenures a peer has. /// Maps reward cycle to bitmap. pub tenures_inv: BTreeMap>, @@ -279,7 +296,6 @@ impl NakamotoTenureInv { neighbor_address: NeighborAddress, ) -> Self { Self { - state: NakamotoInvState::GetNakamotoInvBegin, tenures_inv: BTreeMap::new(), last_updated_at: 0, first_block_height, @@ -335,7 +351,8 @@ impl NakamotoTenureInv { /// Add in a newly-discovered inventory. /// NOTE: inventories are supposed to be aligned to the reward cycle - /// Returns true if we learned about at least one new tenure-start block + /// Returns true if the tenure bitvec has changed -- we either learned about a new tenure-start + /// block, or the remote peer "un-learned" it (e.g. due to a reorg). /// Returns false if not. pub fn merge_tenure_inv(&mut self, tenure_inv: BitVec<2100>, reward_cycle: u64) -> bool { // populate the tenures bitmap to we can fit this tenures inv @@ -367,7 +384,6 @@ impl NakamotoTenureInv { && (self.cur_reward_cycle >= cur_rc || !self.online) { test_debug!("Reset inv comms for {}", &self.neighbor_address); - self.state = NakamotoInvState::GetNakamotoInvBegin; self.online = true; self.start_sync_time = now; self.cur_reward_cycle = start_rc; @@ -474,13 +490,6 @@ impl NakamotoTenureInv { } } -#[derive(Debug, PartialEq, Clone, Copy)] -pub enum NakamotoInvState { - GetNakamotoInvBegin, - GetNakamotoInvFinish, - Done, -} - /// Nakamoto inventory state machine pub struct NakamotoInvStateMachine { /// Communications links diff --git a/stackslib/src/net/mempool/mod.rs b/stackslib/src/net/mempool/mod.rs new file mode 100644 index 00000000000..2a4232ad2fd --- /dev/null +++ b/stackslib/src/net/mempool/mod.rs @@ -0,0 +1,620 @@ +// Copyright (C) 2013-2020 Blockstack PBC, a public benefit corporation +// Copyright (C) 2020-2024 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use std::net::SocketAddr; + +use rand::prelude::*; +use rand::thread_rng; +use stacks_common::types::net::{PeerAddress, PeerHost}; +use stacks_common::util::{get_epoch_time_ms, get_epoch_time_secs}; +use url; + +use crate::burnchains::Txid; +use crate::chainstate::stacks::StacksTransaction; +use crate::core::MemPoolDB; +use crate::net::chat::ConversationP2P; +use crate::net::dns::{DNSClient, DNSRequest}; +use crate::net::httpcore::StacksHttpRequest; +use crate::net::inv::inv2x::*; +use crate::net::p2p::PeerNetwork; +use crate::net::{Error as NetError, HttpRequestContents}; +use crate::util_lib::strings::UrlString; + +/// The four states the mempool sync state machine can be in +#[derive(Debug, Clone, PartialEq)] +pub enum MempoolSyncState { + /// Picking an outbound peer + PickOutboundPeer, + /// Resolving its data URL to a SocketAddr. Contains the data URL, DNS request handle, and + /// mempool page ID + ResolveURL(UrlString, DNSRequest, Txid), + /// Sending the request for mempool transactions. Contains the data URL, resolved socket, and + /// mempool page. + SendQuery(UrlString, SocketAddr, Txid), + /// Receiving the mempool response. Contains the URL, socket address, and event ID + RecvResponse(UrlString, SocketAddr, usize), +} + +/// Mempool synchronization state machine +#[derive(Debug, Clone, PartialEq)] +pub struct MempoolSync { + /// what state are we in? + mempool_state: MempoolSyncState, + /// when's the next mempool sync start? + mempool_sync_deadline: u64, + /// how long can the sync go for? + mempool_sync_timeout: u64, + /// how many complete syncs have happened + mempool_sync_completions: u64, + /// how many txs have been sync'ed? + pub(crate) mempool_sync_txs: u64, + /// what's the API endpoint? + api_endpoint: String, +} + +impl MempoolSync { + pub fn new() -> Self { + Self { + mempool_state: MempoolSyncState::PickOutboundPeer, + mempool_sync_deadline: 0, + mempool_sync_timeout: 0, + mempool_sync_completions: 0, + mempool_sync_txs: 0, + api_endpoint: "/v2/mempool/query".to_string(), + } + } + + /// Do a mempool sync. Return any transactions we might receive. + #[cfg_attr(test, mutants::skip)] + pub fn run( + &mut self, + network: &mut PeerNetwork, + dns_client_opt: &mut Option<&mut DNSClient>, + mempool: &MemPoolDB, + ibd: bool, + ) -> Option> { + if ibd { + return None; + } + + return match self.do_mempool_sync(network, dns_client_opt, mempool) { + (true, txs_opt) => { + // did we run to completion? + if let Some(txs) = txs_opt { + debug!( + "{:?}: Mempool sync obtained {} transactions from mempool sync, and done receiving", + &network.get_local_peer(), + txs.len() + ); + + self.mempool_sync_deadline = + get_epoch_time_secs() + network.get_connection_opts().mempool_sync_interval; + self.mempool_sync_completions = self.mempool_sync_completions.saturating_add(1); + self.mempool_sync_txs = self.mempool_sync_txs.saturating_add(txs.len() as u64); + Some(txs) + } else { + None + } + } + (false, txs_opt) => { + // did we get some transactions, but have more to get? + if let Some(txs) = txs_opt { + debug!( + "{:?}: Mempool sync obtained {} transactions from mempool sync, but have more", + &network.get_local_peer(), + txs.len() + ); + + self.mempool_sync_txs = self.mempool_sync_txs.saturating_add(txs.len() as u64); + Some(txs) + } else { + None + } + } + }; + } + + /// Reset a mempool sync + fn mempool_sync_reset(&mut self) { + self.mempool_state = MempoolSyncState::PickOutboundPeer; + self.mempool_sync_timeout = 0; + } + + /// Pick a peer to mempool sync with. + /// Returns Ok(None) if we're done syncing the mempool. + /// Returns Ok(Some(..)) if we're not done, and can proceed + /// Returns the new sync state -- either ResolveURL if we need to resolve a data URL, + /// or SendQuery if we got the IP address and can just issue the query. + #[cfg_attr(test, mutants::skip)] + fn mempool_sync_pick_outbound_peer( + &mut self, + network: &mut PeerNetwork, + dns_client_opt: &mut Option<&mut DNSClient>, + page_id: &Txid, + ) -> Result, NetError> { + let num_peers = network.get_num_p2p_convos(); + if num_peers == 0 { + debug!("No peers connected; cannot do mempool sync"); + return Ok(None); + } + + let mut idx = thread_rng().gen::() % num_peers; + let mut mempool_sync_data_url = None; + let mut mempool_sync_data_url_and_sockaddr = None; + for _ in 0..num_peers { + let Some((_event_id, convo)) = network.iter_peer_convos().skip(idx).next() else { + idx = 0; + continue; + }; + idx = (idx + 1) % num_peers; + + // only talk to authenticated, outbound peers + if !convo.is_authenticated() || !convo.is_outbound() { + continue; + } + // peer must support mempool protocol + if !ConversationP2P::supports_mempool_query(convo.peer_services) { + continue; + } + // has a data URL? + if convo.data_url.len() == 0 { + continue; + } + // already resolved? + if let Some(sockaddr) = convo.data_ip.as_ref() { + mempool_sync_data_url_and_sockaddr = + Some((convo.data_url.clone(), sockaddr.clone())); + break; + } + // can we resolve the data URL? + let url = convo.data_url.clone(); + if dns_client_opt.is_none() { + if let Ok(Some(_)) = PeerNetwork::try_get_url_ip(&url) { + } else { + // need a DNS client for this one + continue; + } + } + + // will resolve + mempool_sync_data_url = Some(url); + break; + } + + if let Some((url_str, sockaddr)) = mempool_sync_data_url_and_sockaddr { + // already resolved + return Ok(Some(MempoolSyncState::SendQuery( + url_str, + sockaddr, + page_id.clone(), + ))); + } else if let Some(url) = mempool_sync_data_url { + // will need to resolve + self.mempool_sync_begin_resolve_data_url(network, url, dns_client_opt, page_id) + } else { + debug!("No peer has a data URL, so no mempool sync can happen"); + Ok(None) + } + } + + /// Begin resolving the DNS host of a data URL for mempool sync. + /// Returns Ok(None) if we're done syncing the mempool. + /// Returns Ok(Some(..)) if we're not done, and can proceed + /// Returns the new sync state -- either ResolveURL if we need to resolve a data URL, + /// or SendQuery if we got the IP address and can just issue the query. + #[cfg_attr(test, mutants::skip)] + fn mempool_sync_begin_resolve_data_url( + &self, + network: &PeerNetwork, + url_str: UrlString, + dns_client_opt: &mut Option<&mut DNSClient>, + page_id: &Txid, + ) -> Result, NetError> { + // start resolving + let url = url_str.parse_to_block_url()?; + let port = match url.port_or_known_default() { + Some(p) => p, + None => { + warn!("Unsupported URL {:?}: unknown port", &url); + return Ok(None); + } + }; + + // bare IP address? + if let Some(addr) = PeerNetwork::try_get_url_ip(&url_str)? { + return Ok(Some(MempoolSyncState::SendQuery( + url_str, + addr, + page_id.clone(), + ))); + } else if let Some(url::Host::Domain(domain)) = url.host() { + if let Some(ref mut dns_client) = dns_client_opt { + // begin DNS query + match dns_client.queue_lookup( + domain, + port, + get_epoch_time_ms() + network.get_connection_opts().dns_timeout, + ) { + Ok(_) => {} + Err(_) => { + warn!("Failed to queue DNS lookup on {}", &url_str); + return Ok(None); + } + } + return Ok(Some(MempoolSyncState::ResolveURL( + url_str, + DNSRequest::new(domain.to_string(), port, 0), + page_id.clone(), + ))); + } else { + // can't proceed -- no DNS client + return Ok(None); + } + } else { + // can't proceed + return Ok(None); + } + } + + /// Resolve our picked mempool sync peer's data URL. + /// Returns Ok(true, ..) if we're done syncing the mempool. + /// Returns Ok(false, ..) if there's more to do + /// Returns the socket addr if we ever succeed in resolving it. + #[cfg_attr(test, mutants::skip)] + fn mempool_sync_resolve_data_url( + url_str: &UrlString, + request: &DNSRequest, + dns_client_opt: &mut Option<&mut DNSClient>, + ) -> Result<(bool, Option), NetError> { + if let Ok(Some(addr)) = PeerNetwork::try_get_url_ip(url_str) { + // URL contains an IP address -- go with that + Ok((false, Some(addr))) + } else if let Some(dns_client) = dns_client_opt { + // keep trying to resolve + match dns_client.poll_lookup(&request.host, request.port) { + Ok(Some(dns_response)) => match dns_response.result { + Ok(mut addrs) => { + if let Some(addr) = addrs.pop() { + // resolved! + return Ok((false, Some(addr))); + } else { + warn!("DNS returned no results for {}", url_str); + return Ok((true, None)); + } + } + Err(msg) => { + warn!("DNS failed to look up {:?}: {}", &url_str, msg); + return Ok((true, None)); + } + }, + Ok(None) => { + // still in-flight + return Ok((false, None)); + } + Err(e) => { + warn!("DNS lookup failed on {:?}: {:?}", url_str, &e); + return Ok((true, None)); + } + } + } else { + // can't do anything + debug!("No DNS client, and URL contains a domain, so no mempool sync can happen"); + return Ok((true, None)); + } + } + + /// Ask the remote peer for its mempool, connecting to it in the process if need be. + /// Returns Ok((true, ..)) if we're done mempool syncing + /// Returns Ok((false, ..)) if there's more to do + /// Returns the event ID on success + #[cfg_attr(test, mutants::skip)] + fn mempool_sync_send_query( + &mut self, + network: &mut PeerNetwork, + url: &UrlString, + addr: &SocketAddr, + mempool: &MemPoolDB, + page_id: Txid, + ) -> Result<(bool, Option), NetError> { + let sync_data = mempool.make_mempool_sync_data()?; + let request = StacksHttpRequest::new_for_peer( + PeerHost::from_socketaddr(addr), + "POST".into(), + self.api_endpoint.clone(), + HttpRequestContents::new() + .query_arg("page_id".into(), format!("{}", &page_id)) + .payload_stacks(&sync_data), + )?; + + let event_id = network.connect_or_send_http_request(url.clone(), addr.clone(), request)?; + return Ok((false, Some(event_id))); + } + + /// Receive the mempool sync response. + /// Return Ok(true, ..) if we're done with the mempool sync. + /// Return Ok(false, ..) if we have more work to do. + /// Returns the page ID of the next request to make, and the list of transactions we got + #[cfg_attr(test, mutants::skip)] + fn mempool_sync_recv_response( + &mut self, + network: &mut PeerNetwork, + event_id: usize, + ) -> Result<(bool, Option, Option>), NetError> { + PeerNetwork::with_http(network, |network, http| { + match http.get_conversation(event_id) { + None => { + if http.is_connecting(event_id) { + debug!( + "{:?}: Mempool sync event {} is not connected yet", + &network.local_peer, event_id + ); + return Ok((false, None, None)); + } else { + // conversation died + debug!("{:?}: Mempool sync peer hung up", &network.local_peer); + return Ok((true, None, None)); + } + } + Some(ref mut convo) => { + match convo.try_get_response() { + None => { + // still waiting + debug!( + "{:?}: Mempool sync event {} still waiting for a response", + &network.get_local_peer(), + event_id + ); + return Ok((false, None, None)); + } + Some(http_response) => match http_response.decode_mempool_txs_page() { + Ok((txs, page_id_opt)) => { + debug!("{:?}: Mempool sync received response for {} txs, next page {:?}", &network.local_peer, txs.len(), &page_id_opt); + return Ok((true, page_id_opt, Some(txs))); + } + Err(e) => { + warn!( + "{:?}: Mempool sync request did not receive a txs page: {:?}", + &network.local_peer, &e + ); + return Ok((true, None, None)); + } + }, + } + } + } + }) + } + + /// Do a mempool sync + /// Return true if we're done and can advance to the next state. + /// Returns the transactions as well if the sync ran to completion. + #[cfg_attr(test, mutants::skip)] + fn do_mempool_sync( + &mut self, + network: &mut PeerNetwork, + dns_client_opt: &mut Option<&mut DNSClient>, + mempool: &MemPoolDB, + ) -> (bool, Option>) { + if get_epoch_time_secs() <= self.mempool_sync_deadline { + debug!( + "{:?}: Wait until {} to do a mempool sync", + &network.get_local_peer(), + self.mempool_sync_deadline + ); + return (true, None); + } + + if self.mempool_sync_timeout == 0 { + // begin new sync + self.mempool_sync_timeout = + get_epoch_time_secs() + network.get_connection_opts().mempool_sync_timeout; + } else { + if get_epoch_time_secs() > self.mempool_sync_timeout { + debug!( + "{:?}: Mempool sync took too long; terminating", + &network.get_local_peer() + ); + self.mempool_sync_reset(); + return (true, None); + } + } + + // try advancing states until we get blocked. + // Once we get blocked, return. + loop { + let cur_state = self.mempool_state.clone(); + debug!( + "{:?}: Mempool sync state is {:?}", + &network.get_local_peer(), + &cur_state + ); + match cur_state { + MempoolSyncState::PickOutboundPeer => { + // 1. pick a random outbound conversation. + match self.mempool_sync_pick_outbound_peer( + network, + dns_client_opt, + &Txid([0u8; 32]), + ) { + Ok(Some(next_state)) => { + // success! can advance to either resolve a URL or to send a query + self.mempool_state = next_state; + } + Ok(None) => { + // done + self.mempool_sync_reset(); + return (true, None); + } + Err(e) => { + // done; need reset + warn!("mempool_sync_pick_outbound_peer returned {:?}", &e); + self.mempool_sync_reset(); + return (true, None); + } + } + } + MempoolSyncState::ResolveURL(ref url_str, ref dns_request, ref page_id) => { + // 2. resolve its data URL + match Self::mempool_sync_resolve_data_url(url_str, dns_request, dns_client_opt) + { + Ok((false, Some(addr))) => { + // success! advance + self.mempool_state = + MempoolSyncState::SendQuery(url_str.clone(), addr, page_id.clone()); + } + Ok((false, None)) => { + // try again later + return (false, None); + } + Ok((true, _)) => { + // done + self.mempool_sync_reset(); + return (true, None); + } + Err(e) => { + // failed + warn!( + "mempool_sync_resolve_data_url({}) failed: {:?}", + url_str, &e + ); + self.mempool_sync_reset(); + return (true, None); + } + } + } + MempoolSyncState::SendQuery(ref url, ref addr, ref page_id) => { + // 3. ask for the remote peer's mempool's novel txs + // address must be resolvable + if !network.get_connection_opts().private_neighbors + && PeerAddress::from_socketaddr(&addr).is_in_private_range() + { + debug!( + "{:?}: Mempool sync skips {}, which has private IP", + network.get_local_peer(), + &addr + ); + self.mempool_sync_reset(); + return (true, None); + } + debug!( + "{:?}: Mempool sync will query {} for mempool transactions at {}", + &network.get_local_peer(), + url, + page_id + ); + match self.mempool_sync_send_query(network, url, addr, mempool, page_id.clone()) + { + Ok((false, Some(event_id))) => { + // success! advance + debug!("{:?}: Mempool sync query {} for mempool transactions at {} on event {}", &network.get_local_peer(), url, page_id, event_id); + self.mempool_state = + MempoolSyncState::RecvResponse(url.clone(), addr.clone(), event_id); + } + Ok((false, None)) => { + // try again later + return (false, None); + } + Ok((true, _)) => { + // done + self.mempool_sync_reset(); + return (true, None); + } + Err(e) => { + // done + warn!("mempool_sync_send_query({}) returned {:?}", url, &e); + self.mempool_sync_reset(); + return (true, None); + } + } + } + MempoolSyncState::RecvResponse(ref url, ref addr, ref event_id) => { + match self.mempool_sync_recv_response(network, *event_id) { + Ok((true, next_page_id_opt, Some(txs))) => { + debug!( + "{:?}: Mempool sync received {} transactions; next page is {:?}", + &network.get_local_peer(), + txs.len(), + &next_page_id_opt + ); + + // done! got data + let ret = match next_page_id_opt { + Some(next_page_id) => { + // get the next page + self.mempool_state = MempoolSyncState::SendQuery( + url.clone(), + addr.clone(), + next_page_id, + ); + false + } + None => { + // done + self.mempool_sync_reset(); + true + } + }; + return (ret, Some(txs)); + } + Ok((true, _, None)) => { + // done! did not get data + self.mempool_sync_reset(); + return (true, None); + } + Ok((false, _, None)) => { + // still receiving; try again later + return (false, None); + } + Ok((false, _, Some(_))) => { + // should never happen + if cfg!(test) { + panic!("Reached invalid state in {:?}, aborting...", &cur_state); + } + warn!("Reached invalid state in {:?}, resetting...", &cur_state); + self.mempool_sync_reset(); + return (true, None); + } + Err(e) => { + // likely a network error + warn!("mempool_sync_recv_response returned {:?}", &e); + self.mempool_sync_reset(); + return (true, None); + } + } + } + } + } + } +} + +impl PeerNetwork { + /// Run the internal mempool sync machine + pub fn run_mempool_sync( + &mut self, + dns_client: &mut Option<&mut DNSClient>, + mempool: &MemPoolDB, + ibd: bool, + ) -> Option> { + let Some(mut mempool_sync) = self.mempool_sync.take() else { + return None; + }; + + let res = mempool_sync.run(self, dns_client, mempool, ibd); + + self.mempool_sync = Some(mempool_sync); + res + } +} diff --git a/stackslib/src/net/mod.rs b/stackslib/src/net/mod.rs index ddf0e6a7130..e836bdfec21 100644 --- a/stackslib/src/net/mod.rs +++ b/stackslib/src/net/mod.rs @@ -36,8 +36,7 @@ use libstackerdb::{ }; use rand::{thread_rng, RngCore}; use regex::Regex; -use rusqlite::types::ToSqlOutput; -use rusqlite::ToSql; +use rusqlite::types::{ToSql, ToSqlOutput}; use serde::de::Error as de_Error; use serde::ser::Error as ser_Error; use serde::{Deserialize, Serialize}; @@ -125,6 +124,7 @@ pub mod http; /// Links http crate to Stacks pub mod httpcore; pub mod inv; +pub mod mempool; pub mod neighbors; pub mod p2p; /// Implements wrapper around `mio` crate, which itself is a wrapper around Linux's `epoll(2)` syscall. @@ -137,6 +137,7 @@ pub mod relay; pub mod rpc; pub mod server; pub mod stackerdb; +pub mod unsolicited; pub use crate::net::neighbors::{NeighborComms, PeerNetworkComms}; use crate::net::stackerdb::{StackerDBConfig, StackerDBSync, StackerDBSyncResult, StackerDBs}; @@ -284,6 +285,8 @@ pub enum Error { InvalidState, /// Waiting for DNS resolution WaitingForDNS, + /// No reward set for given reward cycle + NoPoXRewardSet(u64), } impl From for Error { @@ -432,6 +435,7 @@ impl fmt::Display for Error { Error::Http(e) => fmt::Display::fmt(&e, f), Error::InvalidState => write!(f, "Invalid state-machine state reached"), Error::WaitingForDNS => write!(f, "Waiting for DNS resolution"), + Error::NoPoXRewardSet(rc) => write!(f, "No PoX reward set for cycle {}", rc), } } } @@ -505,6 +509,7 @@ impl error::Error for Error { Error::Http(ref e) => Some(e), Error::InvalidState => None, Error::WaitingForDNS => None, + Error::NoPoXRewardSet(..) => None, } } } @@ -647,6 +652,8 @@ pub struct StacksNodeState<'a> { inner_mempool: Option<&'a mut MemPoolDB>, inner_rpc_args: Option<&'a RPCHandlerArgs<'a>>, relay_message: Option, + /// Are we in Initial Block Download (IBD) phase? + ibd: bool, } impl<'a> StacksNodeState<'a> { @@ -656,6 +663,7 @@ impl<'a> StacksNodeState<'a> { inner_chainstate: &'a mut StacksChainState, inner_mempool: &'a mut MemPoolDB, inner_rpc_args: &'a RPCHandlerArgs<'a>, + ibd: bool, ) -> StacksNodeState<'a> { StacksNodeState { inner_network: Some(inner_network), @@ -664,6 +672,7 @@ impl<'a> StacksNodeState<'a> { inner_mempool: Some(inner_mempool), inner_rpc_args: Some(inner_rpc_args), relay_message: None, + ibd, } } @@ -906,15 +915,24 @@ pub struct PoxInvData { pub pox_bitvec: Vec, // a bit will be '1' if the node knows for sure the status of its reward cycle's anchor block; 0 if not. } +/// Stacks epoch 2.x pushed block #[derive(Debug, Clone, PartialEq)] pub struct BlocksDatum(pub ConsensusHash, pub StacksBlock); -/// Blocks pushed +/// Stacks epoch 2.x blocks pushed #[derive(Debug, Clone, PartialEq)] pub struct BlocksData { pub blocks: Vec, } +/// Nakamoto epoch 3.x blocks pushed. +/// No need for a separate NakamotoBlocksDatum struct, because the consensus hashes that place this +/// block into the block stream are already embedded within the header +#[derive(Debug, Clone, PartialEq)] +pub struct NakamotoBlocksData { + pub blocks: Vec, +} + /// Microblocks pushed #[derive(Debug, Clone, PartialEq)] pub struct MicroblocksData { @@ -1138,6 +1156,7 @@ pub enum StacksMessageType { // Nakamoto-specific GetNakamotoInv(GetNakamotoInvData), NakamotoInv(NakamotoInvData), + NakamotoBlocks(NakamotoBlocksData), } #[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)] @@ -1172,6 +1191,7 @@ pub enum StacksMessageID { // nakamoto GetNakamotoInv = 26, NakamotoInv = 27, + NakamotoBlocks = 28, // reserved Reserved = 255, } @@ -1263,11 +1283,16 @@ pub const GETPOXINV_MAX_BITLEN: u64 = 4096; #[cfg(test)] pub const GETPOXINV_MAX_BITLEN: u64 = 8; -// maximum number of blocks that can be pushed at once (even if the entire message is undersized). +// maximum number of Stacks epoch2.x blocks that can be pushed at once (even if the entire message is undersized). // This bound is needed since it bounds the amount of I/O a peer can be asked to do to validate the // message. pub const BLOCKS_PUSHED_MAX: u32 = 32; +// maximum number of Nakamoto blocks that can be pushed at once (even if the entire message is undersized). +// This bound is needed since it bounds the amount of I/O a peer can be asked to do to validate the +// message. +pub const NAKAMOTO_BLOCKS_PUSHED_MAX: u32 = 32; + /// neighbor identifier #[derive(Clone, Eq, PartialOrd, Ord)] pub struct NeighborKey { @@ -1423,7 +1448,10 @@ pub const DENY_BAN_DURATION: u64 = 86400; // seconds (1 day) pub const DENY_MIN_BAN_DURATION: u64 = 2; /// Result of doing network work +#[derive(Clone)] pub struct NetworkResult { + /// Stacks chain tip when we began this pass + pub stacks_tip: StacksBlockId, /// PoX ID as it was when we begin downloading blocks (set if we have downloaded new blocks) pub download_pox_id: Option, /// Network messages we received but did not handle @@ -1440,10 +1468,14 @@ pub struct NetworkResult { pub pushed_blocks: HashMap>, /// all Stacks 2.x microblocks pushed to us, and the relay hints from the message pub pushed_microblocks: HashMap, MicroblocksData)>>, + /// all Stacks 3.x blocks pushed to us + pub pushed_nakamoto_blocks: HashMap, NakamotoBlocksData)>>, /// transactions sent to us by the http server pub uploaded_transactions: Vec, /// blocks sent to us via the http server pub uploaded_blocks: Vec, + /// blocks sent to us via the http server + pub uploaded_nakamoto_blocks: Vec, /// microblocks sent to us by the http server pub uploaded_microblocks: Vec, /// chunks we received from the HTTP server @@ -1460,9 +1492,11 @@ pub struct NetworkResult { pub num_inv_sync_passes: u64, /// Number of times the Stacks 2.x block downloader has completed one pass pub num_download_passes: u64, + /// Number of connected peers + pub num_connected_peers: usize, /// The observed burnchain height pub burn_height: u64, - /// The consensus hash of the start of this reward cycle + /// The consensus hash of the burnchain tip (prefixed `rc_` for historical reasons) pub rc_consensus_hash: ConsensusHash, /// The current StackerDB configs pub stacker_db_configs: HashMap, @@ -1470,14 +1504,17 @@ pub struct NetworkResult { impl NetworkResult { pub fn new( + stacks_tip: StacksBlockId, num_state_machine_passes: u64, num_inv_sync_passes: u64, num_download_passes: u64, + num_connected_peers: usize, burn_height: u64, rc_consensus_hash: ConsensusHash, stacker_db_configs: HashMap, ) -> NetworkResult { NetworkResult { + stacks_tip, unhandled_messages: HashMap::new(), download_pox_id: None, blocks: vec![], @@ -1486,7 +1523,9 @@ impl NetworkResult { pushed_transactions: HashMap::new(), pushed_blocks: HashMap::new(), pushed_microblocks: HashMap::new(), + pushed_nakamoto_blocks: HashMap::new(), uploaded_transactions: vec![], + uploaded_nakamoto_blocks: vec![], uploaded_blocks: vec![], uploaded_microblocks: vec![], uploaded_stackerdb_chunks: vec![], @@ -1496,6 +1535,7 @@ impl NetworkResult { num_state_machine_passes: num_state_machine_passes, num_inv_sync_passes: num_inv_sync_passes, num_download_passes: num_download_passes, + num_connected_peers, burn_height, rc_consensus_hash, stacker_db_configs, @@ -1513,7 +1553,7 @@ impl NetworkResult { } pub fn has_nakamoto_blocks(&self) -> bool { - self.nakamoto_blocks.len() > 0 + self.nakamoto_blocks.len() > 0 || self.pushed_nakamoto_blocks.len() > 0 } pub fn has_transactions(&self) -> bool { @@ -1555,7 +1595,7 @@ impl NetworkResult { pub fn consume_unsolicited( &mut self, unhandled_messages: HashMap>, - ) -> () { + ) { for (neighbor_key, messages) in unhandled_messages.into_iter() { for message in messages.into_iter() { match message.payload { @@ -1585,6 +1625,16 @@ impl NetworkResult { .insert(neighbor_key.clone(), vec![(message.relayers, tx_data)]); } } + StacksMessageType::NakamotoBlocks(block_data) => { + if let Some(nakamoto_blocks_msgs) = + self.pushed_nakamoto_blocks.get_mut(&neighbor_key) + { + nakamoto_blocks_msgs.push((message.relayers, block_data)); + } else { + self.pushed_nakamoto_blocks + .insert(neighbor_key.clone(), vec![(message.relayers, block_data)]); + } + } _ => { // forward along if let Some(messages) = self.unhandled_messages.get_mut(&neighbor_key) { @@ -1599,8 +1649,8 @@ impl NetworkResult { } } - pub fn consume_http_uploads(&mut self, mut msgs: Vec) -> () { - for msg in msgs.drain(..) { + pub fn consume_http_uploads(&mut self, msgs: Vec) -> () { + for msg in msgs.into_iter() { match msg { StacksMessageType::Transaction(tx_data) => { self.uploaded_transactions.push(tx_data); @@ -1614,6 +1664,9 @@ impl NetworkResult { StacksMessageType::StackerDBPushChunk(chunk_data) => { self.uploaded_stackerdb_chunks.push(chunk_data); } + StacksMessageType::NakamotoBlocks(data) => { + self.uploaded_nakamoto_blocks.extend(data.blocks); + } _ => { // drop warn!("Dropping unknown HTTP message"); @@ -1656,13 +1709,13 @@ pub mod test { use std::{fs, io, thread}; use clarity::boot_util::boot_code_id; + use clarity::types::sqlite::NO_PARAMS; use clarity::vm::ast::ASTRules; use clarity::vm::costs::ExecutionCost; use clarity::vm::database::STXBalance; use clarity::vm::types::*; use clarity::vm::ClarityVersion; use rand::{Rng, RngCore}; - use rusqlite::NO_PARAMS; use stacks_common::address::*; use stacks_common::codec::StacksMessageCodec; use stacks_common::deps_common::bitcoin::network::serialize::BitcoinHash; @@ -2021,6 +2074,7 @@ pub mod test { /// What services should this peer support? pub services: u16, /// aggregate public key to use + /// (NOTE: will be used post-Nakamoto) pub aggregate_public_key: Option, pub test_stackers: Option>, pub test_signers: Option, @@ -2197,6 +2251,8 @@ pub mod test { (), BitcoinIndexer, >, + /// list of malleablized blocks produced when mining. + pub malleablized_blocks: Vec, } impl<'a> TestPeer<'a> { @@ -2540,6 +2596,7 @@ pub mod test { &mut stacks_node.chainstate, &sortdb, old_stackerdb_configs, + config.connection_opts.num_neighbors, ) .expect("Failed to refresh stackerdb configs"); @@ -2609,6 +2666,7 @@ pub mod test { chainstate_path: chainstate_path, coord: coord, indexer: Some(indexer), + malleablized_blocks: vec![], } } @@ -2641,6 +2699,26 @@ pub mod test { &self.network.local_peer } + pub fn add_neighbor( + &mut self, + n: &mut Neighbor, + stacker_dbs: Option<&[QualifiedContractIdentifier]>, + bootstrap: bool, + ) { + let mut tx = self.network.peerdb.tx_begin().unwrap(); + n.save(&mut tx, stacker_dbs).unwrap(); + if bootstrap { + PeerDB::set_initial_peer( + &tx, + self.config.network_id, + &n.addr.addrbytes, + n.addr.port, + ) + .unwrap(); + } + tx.commit().unwrap(); + } + // TODO: DRY up from PoxSyncWatchdog pub fn infer_initial_burnchain_block_download( burnchain: &Burnchain, @@ -2701,6 +2779,8 @@ pub mod test { let mut mempool = self.mempool.take().unwrap(); let indexer = self.indexer.take().unwrap(); + let old_tip = self.network.stacks_tip.clone(); + let ret = self.network.run( &indexer, &mut sortdb, @@ -2725,8 +2805,8 @@ pub mod test { &mut self, ibd: bool, dns_client: Option<&mut DNSClient>, - ) -> Result { - let mut net_result = self.step_with_ibd_and_dns(ibd, dns_client)?; + ) -> Result<(NetworkResult, ProcessedNetReceipts), net_error> { + let net_result = self.step_with_ibd_and_dns(ibd, dns_client)?; let mut sortdb = self.sortdb.take().unwrap(); let mut stacks_node = self.stacks_node.take().unwrap(); let mut mempool = self.mempool.take().unwrap(); @@ -2734,7 +2814,8 @@ pub mod test { let receipts_res = self.relayer.process_network_result( self.network.get_local_peer(), - &mut net_result, + &mut net_result.clone(), + &self.network.burnchain, &mut sortdb, &mut stacks_node.chainstate, &mut mempool, @@ -2752,7 +2833,7 @@ pub mod test { self.coord.handle_new_stacks_block().unwrap(); self.coord.handle_new_nakamoto_stacks_block().unwrap(); - receipts_res + receipts_res.and_then(|receipts| Ok((net_result, receipts))) } pub fn step_dns(&mut self, dns_client: &mut DNSClient) -> Result { @@ -2778,6 +2859,8 @@ pub mod test { ); let indexer = BitcoinIndexer::new_unit_test(&self.config.burnchain.working_dir); + let old_tip = self.network.stacks_tip.clone(); + let ret = self.network.run( &indexer, &mut sortdb, @@ -2801,6 +2884,9 @@ pub mod test { let sortdb = self.sortdb.take().unwrap(); let mut stacks_node = self.stacks_node.take().unwrap(); let indexer = BitcoinIndexer::new_unit_test(&self.config.burnchain.working_dir); + + let old_tip = self.network.stacks_tip.clone(); + self.network .refresh_burnchain_view(&indexer, &sortdb, &mut stacks_node.chainstate, false) .unwrap(); @@ -2849,7 +2935,15 @@ pub mod test { &mut self, blockstack_ops: Vec, ) -> (u64, BurnchainHeaderHash, ConsensusHash) { - let x = self.inner_next_burnchain_block(blockstack_ops, true, true, true); + let x = self.inner_next_burnchain_block(blockstack_ops, true, true, true, false); + (x.0, x.1, x.2) + } + + pub fn next_burnchain_block_diverge( + &mut self, + blockstack_ops: Vec, + ) -> (u64, BurnchainHeaderHash, ConsensusHash) { + let x = self.inner_next_burnchain_block(blockstack_ops, true, true, true, true); (x.0, x.1, x.2) } @@ -2862,14 +2956,14 @@ pub mod test { ConsensusHash, Option, ) { - self.inner_next_burnchain_block(blockstack_ops, true, true, true) + self.inner_next_burnchain_block(blockstack_ops, true, true, true, false) } pub fn next_burnchain_block_raw( &mut self, blockstack_ops: Vec, ) -> (u64, BurnchainHeaderHash, ConsensusHash) { - let x = self.inner_next_burnchain_block(blockstack_ops, false, false, true); + let x = self.inner_next_burnchain_block(blockstack_ops, false, false, true, false); (x.0, x.1, x.2) } @@ -2877,7 +2971,7 @@ pub mod test { &mut self, blockstack_ops: Vec, ) -> (u64, BurnchainHeaderHash, ConsensusHash) { - let x = self.inner_next_burnchain_block(blockstack_ops, false, false, false); + let x = self.inner_next_burnchain_block(blockstack_ops, false, false, false, false); (x.0, x.1, x.2) } @@ -2890,7 +2984,7 @@ pub mod test { ConsensusHash, Option, ) { - self.inner_next_burnchain_block(blockstack_ops, false, false, true) + self.inner_next_burnchain_block(blockstack_ops, false, false, true, false) } pub fn set_ops_consensus_hash( @@ -2921,6 +3015,7 @@ pub mod test { tip_block_height: u64, tip_block_hash: &BurnchainHeaderHash, num_ops: u64, + ops_determine_block_header: bool, ) -> BurnchainBlockHeader { test_debug!( "make_next_burnchain_block: tip_block_height={} tip_block_hash={} num_ops={}", @@ -2939,8 +3034,16 @@ pub mod test { let now = BURNCHAIN_TEST_BLOCK_TIME; let block_header_hash = BurnchainHeaderHash::from_bitcoin_hash( - &BitcoinIndexer::mock_bitcoin_header(&parent_hdr.block_hash, now as u32) - .bitcoin_hash(), + &BitcoinIndexer::mock_bitcoin_header( + &parent_hdr.block_hash, + (now as u32) + + if ops_determine_block_header { + num_ops as u32 + } else { + 0 + }, + ) + .bitcoin_hash(), ); test_debug!( "Block header hash at {} is {}", @@ -3012,6 +3115,7 @@ pub mod test { set_consensus_hash: bool, set_burn_hash: bool, update_burnchain: bool, + ops_determine_block_header: bool, ) -> ( u64, BurnchainHeaderHash, @@ -3035,6 +3139,7 @@ pub mod test { tip.block_height, &tip.burn_header_hash, blockstack_ops.len() as u64, + ops_determine_block_header, ); if set_burn_hash { @@ -3344,6 +3449,14 @@ pub mod test { self.next_burnchain_block(vec![]) } + pub fn mine_empty_tenure(&mut self) -> (u64, BurnchainHeaderHash, ConsensusHash) { + let (burn_ops, ..) = self.begin_nakamoto_tenure(TenureChangeCause::BlockFound); + let result = self.next_burnchain_block(burn_ops); + // remove the last block commit so that the testpeer doesn't try to build off of this tenure + self.miner.block_commits.pop(); + result + } + pub fn mempool(&mut self) -> &mut MemPoolDB { self.mempool.as_mut().unwrap() } @@ -3352,6 +3465,10 @@ pub mod test { &mut self.stacks_node.as_mut().unwrap().chainstate } + pub fn chainstate_ref(&self) -> &StacksChainState { + &self.stacks_node.as_ref().unwrap().chainstate + } + pub fn sortdb(&mut self) -> &mut SortitionDB { self.sortdb.as_mut().unwrap() } @@ -3467,6 +3584,7 @@ pub mod test { SortitionDB::get_canonical_burn_chain_tip(&self.sortdb.as_ref().unwrap().conn()) .unwrap(); let burnchain = self.config.burnchain.clone(); + let (burn_ops, stacks_block, microblocks) = self.make_tenure( |ref mut miner, ref mut sortdb, @@ -3492,7 +3610,7 @@ pub mod test { StacksBlockBuilder::make_anchored_block_from_txs( block_builder, chainstate, - &sortdb.index_conn(), + &sortdb.index_handle(&tip.sortition_id), block_txs, ) .unwrap(); @@ -3517,6 +3635,14 @@ pub mod test { } self.refresh_burnchain_view(); + + let (stacks_tip_ch, stacks_tip_bh) = + SortitionDB::get_canonical_stacks_chain_tip_hash(self.sortdb().conn()).unwrap(); + assert_eq!( + self.network.stacks_tip.block_id(), + StacksBlockId::new(&stacks_tip_ch, &stacks_tip_bh) + ); + tip_id } @@ -3703,7 +3829,7 @@ pub mod test { |mut builder, ref mut miner, ref sortdb| { let (mut miner_chainstate, _) = StacksChainState::open(false, network_id, &chainstate_path, None).unwrap(); - let sort_iconn = sortdb.index_conn(); + let sort_iconn = sortdb.index_handle_at_tip(); let mut miner_epoch_info = builder .pre_epoch_begin(&mut miner_chainstate, &sort_iconn, true) @@ -3844,29 +3970,12 @@ pub mod test { } /// Verify that the sortition DB migration into Nakamoto worked correctly. - /// For now, it's sufficient to check that the `get_last_processed_reward_cycle()` calculation - /// works the same across both the original and migration-compatible implementations. pub fn check_nakamoto_migration(&mut self) { let mut sortdb = self.sortdb.take().unwrap(); let mut node = self.stacks_node.take().unwrap(); let chainstate = &mut node.chainstate; let tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); - for height in 0..=tip.block_height { - let sns = - SortitionDB::get_all_snapshots_by_burn_height(sortdb.conn(), height).unwrap(); - for sn in sns { - let ih = sortdb.index_handle(&sn.sortition_id); - let highest_processed_rc = ih.get_last_processed_reward_cycle().unwrap(); - let expected_highest_processed_rc = - ih.legacy_get_last_processed_reward_cycle().unwrap(); - assert_eq!( - highest_processed_rc, expected_highest_processed_rc, - "BUG: at burn height {} the highest-processed reward cycles diverge", - height - ); - } - } let epochs = SortitionDB::get_stacks_epochs(sortdb.conn()).unwrap(); let epoch_3_idx = StacksEpoch::find_epoch_by_id(&epochs, StacksEpochId::Epoch30).unwrap(); @@ -4022,6 +4131,37 @@ pub mod test { self.sortdb = Some(sortdb); self.stacks_node = Some(node); } + + /// Verify that all malleablized blocks are duly processed + pub fn check_malleablized_blocks( + &self, + all_blocks: Vec, + expected_siblings: usize, + ) { + for block in all_blocks.iter() { + let sighash = block.header.signer_signature_hash(); + let siblings = self + .chainstate_ref() + .nakamoto_blocks_db() + .get_blocks_at_height(block.header.chain_length); + + debug!("Expect {} siblings: {:?}", expected_siblings, &siblings); + assert_eq!(siblings.len(), expected_siblings); + + for sibling in siblings { + let (processed, orphaned) = NakamotoChainState::get_nakamoto_block_status( + self.chainstate_ref().nakamoto_blocks_db(), + self.chainstate_ref().db(), + &sibling.header.consensus_hash, + &sibling.header.block_hash(), + ) + .unwrap() + .unwrap(); + assert!(processed); + assert!(!orphaned); + } + } + } } pub fn to_addr(sk: &StacksPrivateKey) -> StacksAddress { diff --git a/stackslib/src/net/neighbors/comms.rs b/stackslib/src/net/neighbors/comms.rs index c819ac049b4..31c62a1f8f0 100644 --- a/stackslib/src/net/neighbors/comms.rs +++ b/stackslib/src/net/neighbors/comms.rs @@ -403,6 +403,17 @@ pub trait NeighborComms { convo.is_authenticated() && convo.peer_version > 0 } + /// Are we in the process of connecting to a neighbor? + fn is_neighbor_connecting(&self, network: &PeerNetwork, nk: &NK) -> bool { + if network.is_connecting_neighbor(&nk.to_neighbor_key(network)) { + return true; + } + let Some(event_id) = self.get_connecting(network, nk) else { + return false; + }; + network.is_connecting(event_id) + } + /// Reset all comms fn reset(&mut self) { let _ = self.take_broken_neighbors(); diff --git a/stackslib/src/net/neighbors/mod.rs b/stackslib/src/net/neighbors/mod.rs index 276d04124e9..7e01a0c448f 100644 --- a/stackslib/src/net/neighbors/mod.rs +++ b/stackslib/src/net/neighbors/mod.rs @@ -305,16 +305,13 @@ impl PeerNetwork { // time to do a walk yet? if (self.walk_count > self.connection_opts.num_initial_walks || self.walk_retries > self.connection_opts.walk_retry_count) - && self.walk_deadline > get_epoch_time_secs() + && (!ibd && self.walk_deadline > get_epoch_time_secs()) { // we've done enough walks for an initial mixing, or we can't connect to anyone, // so throttle ourselves down until the walk deadline passes. - test_debug!( + debug!( "{:?}: Throttle walk until {} to walk again (walk count: {}, walk retries: {})", - &self.local_peer, - self.walk_deadline, - self.walk_count, - self.walk_retries + &self.local_peer, self.walk_deadline, self.walk_count, self.walk_retries ); return false; } diff --git a/stackslib/src/net/neighbors/rpc.rs b/stackslib/src/net/neighbors/rpc.rs index 3a5378803f5..c75074222d8 100644 --- a/stackslib/src/net/neighbors/rpc.rs +++ b/stackslib/src/net/neighbors/rpc.rs @@ -245,11 +245,13 @@ impl NeighborRPC { // see if we got any data let Some(http_response) = convo.try_get_response() else { - // still waiting - debug!( - "{:?}: HTTP event {} is still waiting for a response", - &network.local_peer, event_id - ); + if !convo.is_idle() { + // still waiting + debug!( + "{:?}: HTTP event {} is still waiting for a response", + &network.local_peer, event_id + ); + } return Ok(None); }; diff --git a/stackslib/src/net/neighbors/walk.rs b/stackslib/src/net/neighbors/walk.rs index 8a0e370ba8f..478f5c0e3df 100644 --- a/stackslib/src/net/neighbors/walk.rs +++ b/stackslib/src/net/neighbors/walk.rs @@ -1012,7 +1012,7 @@ impl NeighborWalk { continue; } Err(e) => { - info!( + debug!( "{:?}: Failed to connect to {:?}: {:?}", network.get_local_peer(), &nk, diff --git a/stackslib/src/net/p2p.rs b/stackslib/src/net/p2p.rs index b60146dff31..861a6e6cfab 100644 --- a/stackslib/src/net/p2p.rs +++ b/stackslib/src/net/p2p.rs @@ -33,22 +33,22 @@ use stacks_common::consts::{FIRST_BURNCHAIN_CONSENSUS_HASH, FIRST_STACKS_BLOCK_H use stacks_common::types::chainstate::{PoxId, SortitionId}; use stacks_common::types::net::{PeerAddress, PeerHost}; use stacks_common::types::StacksEpochId; -use stacks_common::util::hash::to_hex; +use stacks_common::util::hash::{to_hex, Sha512Trunc256Sum}; use stacks_common::util::secp256k1::Secp256k1PublicKey; use stacks_common::util::{get_epoch_time_ms, get_epoch_time_secs, log}; -use wsts::curve::point::Point; use {mio, url}; use crate::burnchains::db::{BurnchainDB, BurnchainHeaderReader}; use crate::burnchains::{Address, Burnchain, BurnchainView, PublicKey}; -use crate::chainstate::burn::db::sortdb::{BlockHeaderCache, SortitionDB}; +use crate::chainstate::burn::db::sortdb::{get_ancestor_sort_id, BlockHeaderCache, SortitionDB}; use crate::chainstate::burn::BlockSnapshot; use crate::chainstate::coordinator::{ static_get_canonical_affirmation_map, static_get_heaviest_affirmation_map, - static_get_stacks_tip_affirmation_map, + static_get_stacks_tip_affirmation_map, OnChainRewardSetProvider, RewardCycleInfo, }; -use crate::chainstate::stacks::boot::MINERS_NAME; -use crate::chainstate::stacks::db::StacksChainState; +use crate::chainstate::nakamoto::coordinator::load_nakamoto_reward_set; +use crate::chainstate::stacks::boot::{RewardSet, MINERS_NAME}; +use crate::chainstate::stacks::db::{StacksBlockHeaderTypes, StacksChainState}; use crate::chainstate::stacks::{StacksBlockHeader, MAX_BLOCK_LEN, MAX_TRANSACTION_LEN}; use crate::core::StacksEpoch; use crate::monitoring::{update_inbound_neighbors, update_outbound_neighbors}; @@ -63,6 +63,7 @@ use crate::net::http::HttpRequestContents; use crate::net::httpcore::StacksHttpRequest; use crate::net::inv::inv2x::*; use crate::net::inv::nakamoto::{InvGenerator, NakamotoInvStateMachine}; +use crate::net::mempool::MempoolSync; use crate::net::neighbors::*; use crate::net::poll::{NetworkPollState, NetworkState}; use crate::net::prune::*; @@ -90,6 +91,7 @@ pub enum NetworkRequest { /// The "main loop" for sending/receiving data is a select/poll loop, and runs outside of other /// threads that need a synchronous RPC or a multi-RPC interface. This object gives those threads /// a way to issue commands and hear back replies from them. +#[derive(Clone)] pub struct NetworkHandle { chan_in: SyncSender, } @@ -193,22 +195,74 @@ pub enum PeerNetworkWorkState { Prune, } -/// The four states the mempool sync state machine can be in -#[derive(Debug, Clone, PartialEq)] -pub enum MempoolSyncState { - /// Picking an outbound peer - PickOutboundPeer, - /// Resolving its data URL to a SocketAddr. Contains the data URL, DNS request handle, and - /// mempool page ID - ResolveURL(UrlString, DNSRequest, Txid), - /// Sending the request for mempool transactions. Contains the data URL, resolved socket, and - /// mempool page. - SendQuery(UrlString, SocketAddr, Txid), - /// Receiving the mempool response. Contains the URL, socket address, and event ID - RecvResponse(UrlString, SocketAddr, usize), +pub type PeerMap = HashMap; +pub type PendingMessages = HashMap>; + +pub struct ConnectingPeer { + socket: mio_net::TcpStream, + outbound: bool, + timestamp: u64, + nk: NeighborKey, } -pub type PeerMap = HashMap; +impl ConnectingPeer { + pub fn new( + socket: mio_net::TcpStream, + outbound: bool, + timestamp: u64, + nk: NeighborKey, + ) -> Self { + Self { + socket, + outbound, + timestamp, + nk, + } + } +} + +/// Cached reward cycle, for validating pushed blocks +#[derive(Clone, Debug, PartialEq)] +pub struct CurrentRewardSet { + pub reward_cycle: u64, + pub reward_cycle_info: RewardCycleInfo, + pub anchor_block_consensus_hash: ConsensusHash, + pub anchor_block_hash: BlockHeaderHash, +} + +impl CurrentRewardSet { + pub fn reward_set(&self) -> Option<&RewardSet> { + self.reward_cycle_info.known_selected_anchor_block() + } + + pub fn anchor_block_id(&self) -> StacksBlockId { + StacksBlockId::new(&self.anchor_block_consensus_hash, &self.anchor_block_hash) + } +} + +/// Cached stacks chain tip info, consumed by RPC endpoints +#[derive(Clone, Debug, PartialEq)] +pub struct StacksTipInfo { + pub consensus_hash: ConsensusHash, + pub block_hash: BlockHeaderHash, + pub height: u64, + pub is_nakamoto: bool, +} + +impl StacksTipInfo { + pub fn empty() -> Self { + Self { + consensus_hash: ConsensusHash([0u8; 20]), + block_hash: BlockHeaderHash([0u8; 32]), + height: 0, + is_nakamoto: false, + } + } + + pub fn block_id(&self) -> StacksBlockId { + StacksBlockId::new(&self.consensus_hash, &self.block_hash) + } +} pub struct PeerNetwork { // constants @@ -225,24 +279,18 @@ pub struct PeerNetwork { pub ast_rules: ASTRules, /// Current Stacks tip -- the highest block's consensus hash, block hash, and height - pub stacks_tip: (ConsensusHash, BlockHeaderHash, u64), - /// Sortition that corresponds to the current Stacks tip, if known - pub stacks_tip_sn: Option, + pub stacks_tip: StacksTipInfo, /// Parent tenure Stacks tip -- the last block in the current tip's parent tenure. /// In epoch 2.x, this is the parent block. /// In nakamoto, this is the last block in the parent tenure - pub parent_stacks_tip: (ConsensusHash, BlockHeaderHash, u64), + pub parent_stacks_tip: StacksTipInfo, /// The block id of the first block in this tenure. /// In epoch 2.x, this is the same as the tip block ID /// In nakamoto, this is the block ID of the first block in the current tenure pub tenure_start_block_id: StacksBlockId, - /// The aggregate public keys of each witnessed reward cycle. - /// Only active during epoch 3.x and beyond. - /// Gets refreshed on each new Stacks block arrival, which deals with burnchain forks. - /// Stored in a BTreeMap because we often need to query the last or second-to-last reward cycle - /// aggregate public key, and we need to determine whether or not to load new reward cycles' - /// keys. - pub aggregate_public_keys: BTreeMap>, + /// The reward sets of the past three reward cycles. + /// Needed to validate blocks, which are signed by a threshold of stackers + pub current_reward_sets: BTreeMap, // information about the state of the network's anchor blocks pub heaviest_affirmation_map: AffirmationMap, @@ -260,7 +308,7 @@ pub struct PeerNetwork { pub peers: PeerMap, pub sockets: HashMap, pub events: HashMap, - pub connecting: HashMap, // (socket, outbound?, connection sent timestamp) + pub connecting: HashMap, pub bans: HashSet, // ongoing messages the network is sending via the p2p interface @@ -284,7 +332,10 @@ pub struct PeerNetwork { // work state -- we can be walking, fetching block inventories, fetching blocks, pruning, etc. pub work_state: PeerNetworkWorkState, pub nakamoto_work_state: PeerNetworkWorkState, - have_data_to_download: bool, + pub(crate) have_data_to_download: bool, + + /// Mempool sync machine + pub mempool_sync: Option, // neighbor walk state pub walk: Option>, @@ -327,15 +378,6 @@ pub struct PeerNetwork { // handle to all stacker DB state pub stackerdbs: StackerDBs, - // outstanding request to perform a mempool sync - // * mempool_sync_deadline is when the next mempool sync must start - // * mempool_sync_timeout is when the current mempool sync must stop - mempool_state: MempoolSyncState, - mempool_sync_deadline: u64, - mempool_sync_timeout: u64, - mempool_sync_completions: u64, - mempool_sync_txs: u64, - // how often we pruned a given inbound/outbound peer pub prune_outbound_counts: HashMap, pub prune_inbound_counts: HashMap, @@ -372,9 +414,10 @@ pub struct PeerNetwork { antientropy_start_reward_cycle: u64, pub antientropy_last_push_ts: u64, - // pending messages (BlocksAvailable, MicroblocksAvailable, BlocksData, Microblocks) that we - // can't process yet, but might be able to process on the next chain view update - pub pending_messages: HashMap>, + /// Pending messages (BlocksAvailable, MicroblocksAvailable, BlocksData, Microblocks, + /// NakamotoBlocks) that we can't process yet, but might be able to process on a subsequent + /// chain view update. + pub pending_messages: PendingMessages, // fault injection -- force disconnects fault_last_disconnect: u64, @@ -449,11 +492,10 @@ impl PeerNetwork { &first_burn_header_hash, first_burn_header_ts as u64, ), - stacks_tip: (ConsensusHash([0x00; 20]), BlockHeaderHash([0x00; 32]), 0), - stacks_tip_sn: None, - parent_stacks_tip: (ConsensusHash([0x00; 20]), BlockHeaderHash([0x00; 32]), 0), + stacks_tip: StacksTipInfo::empty(), + parent_stacks_tip: StacksTipInfo::empty(), tenure_start_block_id: StacksBlockId([0x00; 32]), - aggregate_public_keys: BTreeMap::new(), + current_reward_sets: BTreeMap::new(), peerdb: peerdb, atlasdb: atlasdb, @@ -479,6 +521,8 @@ impl PeerNetwork { nakamoto_work_state: PeerNetworkWorkState::GetPublicIP, have_data_to_download: false, + mempool_sync: Some(MempoolSync::new()), + walk: None, walk_deadline: 0, walk_attempts: 0, @@ -503,12 +547,6 @@ impl PeerNetwork { stacker_db_configs: stacker_db_configs, stackerdbs: stackerdbs, - mempool_state: MempoolSyncState::PickOutboundPeer, - mempool_sync_deadline: 0, - mempool_sync_timeout: 0, - mempool_sync_completions: 0, - mempool_sync_txs: 0, - prune_outbound_counts: HashMap::new(), prune_inbound_counts: HashMap::new(), @@ -536,7 +574,7 @@ impl PeerNetwork { antientropy_last_push_ts: 0, antientropy_start_reward_cycle: 0, - pending_messages: HashMap::new(), + pending_messages: PendingMessages::new(), fault_last_disconnect: 0, @@ -646,6 +684,24 @@ impl PeerNetwork { Ok(()) } + /// Call `bind()` only if not already bound + /// Returns: + /// - `Ok(true)` if `bind()` call was successful + /// - `Ok(false)` if `bind()` call was skipped + /// - `Err()` if `bind()`` failed + #[cfg_attr(test, mutants::skip)] + pub fn try_bind( + &mut self, + my_addr: &SocketAddr, + http_addr: &SocketAddr, + ) -> Result { + if self.network.is_some() { + // Already bound + return Ok(false); + } + self.bind(my_addr, http_addr).map(|()| true) + } + /// Get bound neighbor key. This is how this PeerNetwork appears to other nodes. pub fn bound_neighbor_key(&self) -> &NeighborKey { &self.bind_nk @@ -960,13 +1016,15 @@ impl PeerNetwork { })? } - /// Broadcast a message to a list of neighbors + /// Broadcast a message to a list of neighbors. + /// Neighbors in the `relay_hints` vec will *not* receive data, since they were the one(s) that + /// sent this peer the message in the first place. pub fn broadcast_message( &mut self, mut neighbor_keys: Vec, relay_hints: Vec, message_payload: StacksMessageType, - ) -> () { + ) { debug!( "{:?}: Will broadcast '{}' to up to {} neighbors; relayed by {:?}", &self.local_peer, @@ -1157,8 +1215,10 @@ impl PeerNetwork { let registered_event_id = network.register(self.p2p_network_handle, hint_event_id, &sock)?; - self.connecting - .insert(registered_event_id, (sock, true, get_epoch_time_secs())); + self.connecting.insert( + registered_event_id, + ConnectingPeer::new(sock, true, get_epoch_time_secs(), neighbor.clone()), + ); registered_event_id } }; @@ -1287,12 +1347,13 @@ impl PeerNetwork { Ok(ret) } + #[cfg_attr(test, mutants::skip)] /// Dispatch a single request from another thread. pub fn dispatch_request(&mut self, request: NetworkRequest) -> Result<(), net_error> { match request { NetworkRequest::Ban(neighbor_keys) => { for neighbor_key in neighbor_keys.iter() { - debug!("Request to ban {:?}", neighbor_key); + info!("Request to ban {:?}", neighbor_key); match self.events.get(neighbor_key) { Some(event_id) => { debug!("Will ban {:?} (event {})", neighbor_key, event_id); @@ -1344,6 +1405,17 @@ impl PeerNetwork { } Ok(all_neighbors.into_iter().collect()) } + StacksMessageType::NakamotoBlocks(ref data) => { + // send to each neighbor that needs one + let mut all_neighbors = HashSet::new(); + for nakamoto_block in data.blocks.iter() { + let neighbors = + self.sample_broadcast_peers(&relay_hints, nakamoto_block)?; + + all_neighbors.extend(neighbors); + } + Ok(all_neighbors.into_iter().collect()) + } StacksMessageType::Transaction(ref data) => { self.sample_broadcast_peers(&relay_hints, data) } @@ -1554,6 +1626,14 @@ impl PeerNetwork { self.connecting.contains_key(&event_id) } + /// Is a neighbor connecting on any event? + pub fn is_connecting_neighbor(&self, nk: &NeighborKey) -> bool { + self.connecting + .iter() + .find(|(_, peer)| peer.nk == *nk) + .is_some() + } + /// Is this neighbor key the same as the one that represents our p2p bind address? pub fn is_bound(&self, neighbor_key: &NeighborKey) -> bool { self.bind_nk.network_id == neighbor_key.network_id @@ -1829,7 +1909,7 @@ impl PeerNetwork { let _ = network.deregister(event_id, &socket); } // deregister socket if still connecting - if let Some((socket, ..)) = self.connecting.remove(&event_id) { + if let Some(ConnectingPeer { socket, .. }) = self.connecting.remove(&event_id) { let _ = network.deregister(event_id, &socket); } } @@ -2089,7 +2169,9 @@ impl PeerNetwork { fn process_connecting_sockets(&mut self, poll_state: &mut NetworkPollState) { for event_id in poll_state.ready.iter() { if self.connecting.contains_key(event_id) { - let (socket, outbound, _) = self.connecting.remove(event_id).unwrap(); + let ConnectingPeer { + socket, outbound, .. + } = self.connecting.remove(event_id).unwrap(); let sock_str = format!("{:?}", &socket); if let Err(_e) = self.register_peer(*event_id, socket, outbound) { debug!( @@ -2241,9 +2323,18 @@ impl PeerNetwork { fn disconnect_unresponsive(&mut self) -> usize { let now = get_epoch_time_secs(); let mut to_remove = vec![]; - for (event_id, (socket, _, ts)) in self.connecting.iter() { - if ts + self.connection_opts.connect_timeout < now { - debug!("{:?}: Disconnect unresponsive connecting peer {:?} (event {}): timed out after {} ({} < {})s", &self.local_peer, socket, event_id, self.connection_opts.timeout, ts + self.connection_opts.timeout, now); + for (event_id, peer) in self.connecting.iter() { + if peer.timestamp + self.connection_opts.connect_timeout < now { + debug!( + "{:?}: Disconnect unresponsive connecting peer {:?} (event {} neighbor {}): timed out after {} ({} < {})s", + &self.local_peer, + &peer.socket, + event_id, + &peer.nk, + self.connection_opts.timeout, + peer.timestamp + self.connection_opts.timeout, + now + ); to_remove.push(*event_id); } } @@ -2509,55 +2600,6 @@ impl PeerNetwork { done } - /// Do a mempool sync. Return any transactions we might receive. - #[cfg_attr(test, mutants::skip)] - fn do_network_mempool_sync( - &mut self, - dns_client_opt: &mut Option<&mut DNSClient>, - mempool: &MemPoolDB, - ibd: bool, - ) -> Option> { - if ibd { - return None; - } - - return match self.do_mempool_sync(dns_client_opt, mempool) { - (true, txs_opt) => { - // did we run to completion? - if let Some(txs) = txs_opt { - debug!( - "{:?}: Mempool sync obtained {} transactions from mempool sync, and done receiving", - &self.local_peer, - txs.len() - ); - - self.mempool_sync_deadline = - get_epoch_time_secs() + self.connection_opts.mempool_sync_interval; - self.mempool_sync_completions = self.mempool_sync_completions.saturating_add(1); - self.mempool_sync_txs = self.mempool_sync_txs.saturating_add(txs.len() as u64); - Some(txs) - } else { - None - } - } - (false, txs_opt) => { - // did we get some transactions, but have more to get? - if let Some(txs) = txs_opt { - debug!( - "{:?}: Mempool sync obtained {} transactions from mempool sync, but have more", - &self.local_peer, - txs.len() - ); - - self.mempool_sync_txs = self.mempool_sync_txs.saturating_add(txs.len() as u64); - Some(txs) - } else { - None - } - } - }; - } - /// Begin the process of learning this peer's public IP address. /// Return Ok(finished with this step) /// Return Err(..) on failure @@ -3487,435 +3529,6 @@ impl PeerNetwork { } } - /// Reset a mempool sync - fn mempool_sync_reset(&mut self) { - self.mempool_state = MempoolSyncState::PickOutboundPeer; - self.mempool_sync_timeout = 0; - } - - /// Pick a peer to mempool sync with. - /// Returns Ok(None) if we're done syncing the mempool. - /// Returns Ok(Some(..)) if we're not done, and can proceed - /// Returns the new sync state -- either ResolveURL if we need to resolve a data URL, - /// or SendQuery if we got the IP address and can just issue the query. - #[cfg_attr(test, mutants::skip)] - fn mempool_sync_pick_outbound_peer( - &mut self, - dns_client_opt: &mut Option<&mut DNSClient>, - page_id: &Txid, - ) -> Result, net_error> { - if self.peers.len() == 0 { - debug!("No peers connected; cannot do mempool sync"); - return Ok(None); - } - - let mut idx = thread_rng().gen::() % self.peers.len(); - let mut mempool_sync_data_url = None; - for _ in 0..self.peers.len() + 1 { - let event_id = match self.peers.keys().skip(idx).next() { - Some(eid) => *eid, - None => { - idx = 0; - continue; - } - }; - idx = (idx + 1) % self.peers.len(); - - if let Some(convo) = self.peers.get(&event_id) { - if !convo.is_authenticated() || !convo.is_outbound() { - continue; - } - if !ConversationP2P::supports_mempool_query(convo.peer_services) { - continue; - } - if convo.data_url.len() == 0 { - continue; - } - let url = convo.data_url.clone(); - if dns_client_opt.is_none() { - if let Ok(Some(_)) = PeerNetwork::try_get_url_ip(&url) { - } else { - // need a DNS client for this one - continue; - } - } - - mempool_sync_data_url = Some(url); - break; - } - } - - if let Some(url) = mempool_sync_data_url { - self.mempool_sync_begin_resolve_data_url(url, dns_client_opt, page_id) - } else { - debug!("No peer has a data URL, so no mempool sync can happen"); - Ok(None) - } - } - - /// Begin resolving the DNS host of a data URL for mempool sync. - /// Returns Ok(None) if we're done syncing the mempool. - /// Returns Ok(Some(..)) if we're not done, and can proceed - /// Returns the new sync state -- either ResolveURL if we need to resolve a data URL, - /// or SendQuery if we got the IP address and can just issue the query. - #[cfg_attr(test, mutants::skip)] - fn mempool_sync_begin_resolve_data_url( - &self, - url_str: UrlString, - dns_client_opt: &mut Option<&mut DNSClient>, - page_id: &Txid, - ) -> Result, net_error> { - // start resolving - let url = url_str.parse_to_block_url()?; - let port = match url.port_or_known_default() { - Some(p) => p, - None => { - warn!("Unsupported URL {:?}: unknown port", &url); - return Ok(None); - } - }; - - // bare IP address? - if let Some(addr) = PeerNetwork::try_get_url_ip(&url_str)? { - return Ok(Some(MempoolSyncState::SendQuery( - url_str, - addr, - page_id.clone(), - ))); - } else if let Some(url::Host::Domain(domain)) = url.host() { - if let Some(ref mut dns_client) = dns_client_opt { - // begin DNS query - match dns_client.queue_lookup( - domain, - port, - get_epoch_time_ms() + self.connection_opts.dns_timeout, - ) { - Ok(_) => {} - Err(_) => { - warn!("Failed to queue DNS lookup on {}", &url_str); - return Ok(None); - } - } - return Ok(Some(MempoolSyncState::ResolveURL( - url_str, - DNSRequest::new(domain.to_string(), port, 0), - page_id.clone(), - ))); - } else { - // can't proceed -- no DNS client - return Ok(None); - } - } else { - // can't proceed - return Ok(None); - } - } - - /// Resolve our picked mempool sync peer's data URL. - /// Returns Ok(true, ..) if we're done syncing the mempool. - /// Returns Ok(false, ..) if there's more to do - /// Returns the socket addr if we ever succeed in resolving it. - #[cfg_attr(test, mutants::skip)] - fn mempool_sync_resolve_data_url( - &mut self, - url_str: &UrlString, - request: &DNSRequest, - dns_client_opt: &mut Option<&mut DNSClient>, - ) -> Result<(bool, Option), net_error> { - if let Ok(Some(addr)) = PeerNetwork::try_get_url_ip(url_str) { - // URL contains an IP address -- go with that - Ok((false, Some(addr))) - } else if let Some(dns_client) = dns_client_opt { - // keep trying to resolve - match dns_client.poll_lookup(&request.host, request.port) { - Ok(Some(dns_response)) => match dns_response.result { - Ok(mut addrs) => { - if let Some(addr) = addrs.pop() { - // resolved! - return Ok((false, Some(addr))); - } else { - warn!("DNS returned no results for {}", url_str); - return Ok((true, None)); - } - } - Err(msg) => { - warn!("DNS failed to look up {:?}: {}", &url_str, msg); - return Ok((true, None)); - } - }, - Ok(None) => { - // still in-flight - return Ok((false, None)); - } - Err(e) => { - warn!("DNS lookup failed on {:?}: {:?}", url_str, &e); - return Ok((true, None)); - } - } - } else { - // can't do anything - debug!("No DNS client, and URL contains a domain, so no mempool sync can happen"); - return Ok((true, None)); - } - } - - /// Ask the remote peer for its mempool, connecting to it in the process if need be. - /// Returns Ok((true, ..)) if we're done mempool syncing - /// Returns Ok((false, ..)) if there's more to do - /// Returns the event ID on success - #[cfg_attr(test, mutants::skip)] - fn mempool_sync_send_query( - &mut self, - url: &UrlString, - addr: &SocketAddr, - mempool: &MemPoolDB, - page_id: Txid, - ) -> Result<(bool, Option), net_error> { - let sync_data = mempool.make_mempool_sync_data()?; - let request = StacksHttpRequest::new_for_peer( - PeerHost::from_socketaddr(addr), - "POST".into(), - "/v2/mempool/query".into(), - HttpRequestContents::new() - .query_arg("page_id".into(), format!("{}", &page_id)) - .payload_stacks(&sync_data), - )?; - - let event_id = self.connect_or_send_http_request(url.clone(), addr.clone(), request)?; - return Ok((false, Some(event_id))); - } - - /// Receive the mempool sync response. - /// Return Ok(true, ..) if we're done with the mempool sync. - /// Return Ok(false, ..) if we have more work to do. - /// Returns the page ID of the next request to make, and the list of transactions we got - #[cfg_attr(test, mutants::skip)] - fn mempool_sync_recv_response( - &mut self, - event_id: usize, - ) -> Result<(bool, Option, Option>), net_error> { - PeerNetwork::with_http(self, |network, http| { - match http.get_conversation(event_id) { - None => { - if http.is_connecting(event_id) { - debug!( - "{:?}: Mempool sync event {} is not connected yet", - &network.local_peer, event_id - ); - return Ok((false, None, None)); - } else { - // conversation died - debug!("{:?}: Mempool sync peer hung up", &network.local_peer); - return Ok((true, None, None)); - } - } - Some(ref mut convo) => { - match convo.try_get_response() { - None => { - // still waiting - debug!( - "{:?}: Mempool sync event {} still waiting for a response", - &network.local_peer, event_id - ); - return Ok((false, None, None)); - } - Some(http_response) => match http_response.decode_mempool_txs_page() { - Ok((txs, page_id_opt)) => { - debug!("{:?}: Mempool sync received response for {} txs, next page {:?}", &network.local_peer, txs.len(), &page_id_opt); - return Ok((true, page_id_opt, Some(txs))); - } - Err(e) => { - warn!( - "{:?}: Mempool sync request did not receive a txs page: {:?}", - &network.local_peer, &e - ); - return Ok((true, None, None)); - } - }, - } - } - } - }) - } - - /// Do a mempool sync - /// Return true if we're done and can advance to the next state. - /// Returns the transactions as well if the sync ran to completion. - #[cfg_attr(test, mutants::skip)] - fn do_mempool_sync( - &mut self, - dns_client_opt: &mut Option<&mut DNSClient>, - mempool: &MemPoolDB, - ) -> (bool, Option>) { - if get_epoch_time_secs() <= self.mempool_sync_deadline { - debug!( - "{:?}: Wait until {} to do a mempool sync", - &self.local_peer, self.mempool_sync_deadline - ); - return (true, None); - } - - if self.mempool_sync_timeout == 0 { - // begin new sync - self.mempool_sync_timeout = - get_epoch_time_secs() + self.connection_opts.mempool_sync_timeout; - } else { - if get_epoch_time_secs() > self.mempool_sync_timeout { - debug!( - "{:?}: Mempool sync took too long; terminating", - &self.local_peer - ); - self.mempool_sync_reset(); - return (true, None); - } - } - - // try advancing states until we get blocked. - // Once we get blocked, return. - loop { - let cur_state = self.mempool_state.clone(); - debug!( - "{:?}: Mempool sync state is {:?}", - &self.local_peer, &cur_state - ); - match cur_state { - MempoolSyncState::PickOutboundPeer => { - // 1. pick a random outbound conversation. - match self.mempool_sync_pick_outbound_peer(dns_client_opt, &Txid([0u8; 32])) { - Ok(Some(next_state)) => { - // success! can advance to either resolve a URL or to send a query - self.mempool_state = next_state; - } - Ok(None) => { - // done - self.mempool_sync_reset(); - return (true, None); - } - Err(e) => { - // done; need reset - warn!("mempool_sync_pick_outbound_peer returned {:?}", &e); - self.mempool_sync_reset(); - return (true, None); - } - } - } - MempoolSyncState::ResolveURL(ref url_str, ref dns_request, ref page_id) => { - // 2. resolve its data URL - match self.mempool_sync_resolve_data_url(url_str, dns_request, dns_client_opt) { - Ok((false, Some(addr))) => { - // success! advance - self.mempool_state = - MempoolSyncState::SendQuery(url_str.clone(), addr, page_id.clone()); - } - Ok((false, None)) => { - // try again later - return (false, None); - } - Ok((true, _)) => { - // done - self.mempool_sync_reset(); - return (true, None); - } - Err(e) => { - // failed - warn!( - "mempool_sync_resolve_data_url({}) failed: {:?}", - url_str, &e - ); - self.mempool_sync_reset(); - return (true, None); - } - } - } - MempoolSyncState::SendQuery(ref url, ref addr, ref page_id) => { - // 3. ask for the remote peer's mempool's novel txs - debug!( - "{:?}: Mempool sync will query {} for mempool transactions at {}", - &self.local_peer, url, page_id - ); - match self.mempool_sync_send_query(url, addr, mempool, page_id.clone()) { - Ok((false, Some(event_id))) => { - // success! advance - debug!("{:?}: Mempool sync query {} for mempool transactions at {} on event {}", &self.local_peer, url, page_id, event_id); - self.mempool_state = - MempoolSyncState::RecvResponse(url.clone(), addr.clone(), event_id); - } - Ok((false, None)) => { - // try again later - return (false, None); - } - Ok((true, _)) => { - // done - self.mempool_sync_reset(); - return (true, None); - } - Err(e) => { - // done - warn!("mempool_sync_send_query({}) returned {:?}", url, &e); - self.mempool_sync_reset(); - return (true, None); - } - } - } - MempoolSyncState::RecvResponse(ref url, ref addr, ref event_id) => { - match self.mempool_sync_recv_response(*event_id) { - Ok((true, next_page_id_opt, Some(txs))) => { - debug!( - "{:?}: Mempool sync received {} transactions; next page is {:?}", - &self.local_peer, - txs.len(), - &next_page_id_opt - ); - - // done! got data - let ret = match next_page_id_opt { - Some(next_page_id) => { - // get the next page - self.mempool_state = MempoolSyncState::SendQuery( - url.clone(), - addr.clone(), - next_page_id, - ); - false - } - None => { - // done - self.mempool_sync_reset(); - true - } - }; - return (ret, Some(txs)); - } - Ok((true, _, None)) => { - // done! did not get data - self.mempool_sync_reset(); - return (true, None); - } - Ok((false, _, None)) => { - // still receiving; try again later - return (false, None); - } - Ok((false, _, Some(_))) => { - // should never happen - if cfg!(test) { - panic!("Reached invalid state in {:?}, aborting...", &cur_state); - } - warn!("Reached invalid state in {:?}, resetting...", &cur_state); - self.mempool_sync_reset(); - return (true, None); - } - Err(e) => { - // likely a network error - warn!("mempool_sync_recv_response returned {:?}", &e); - self.mempool_sync_reset(); - return (true, None); - } - } - } - } - } - } - /// Do the actual work in the state machine. /// Return true if we need to prune connections. /// This will call the epoch-appropriate network worker @@ -3993,7 +3606,7 @@ impl PeerNetwork { &mut self, burnchain_height: u64, sortdb: &SortitionDB, - chainstate: &StacksChainState, + chainstate: &mut StacksChainState, ibd: bool, network_result: &mut NetworkResult, ) -> bool { @@ -4366,759 +3979,6 @@ impl PeerNetwork { Some(outbound_neighbor_key) } - /// Update a peer's inventory state to indicate that the given block is available. - /// If updated, return the sortition height of the bit in the inv that was set. - /// Only valid for epoch 2.x - fn handle_unsolicited_inv_update_epoch2x( - &mut self, - sortdb: &SortitionDB, - event_id: usize, - outbound_neighbor_key: &NeighborKey, - consensus_hash: &ConsensusHash, - microblocks: bool, - ) -> Result, net_error> { - let epoch = self.get_current_epoch(); - if epoch.epoch_id >= StacksEpochId::Epoch30 { - info!( - "{:?}: Ban peer event {} for sending an inv 2.x update for {} in epoch 3.x", - event_id, - self.get_local_peer(), - consensus_hash - ); - self.bans.insert(event_id); - - if let Some(outbound_event_id) = self.events.get(&outbound_neighbor_key) { - self.bans.insert(*outbound_event_id); - } - return Ok(None); - } - - let block_sortition_height = match self.inv_state { - Some(ref mut inv) => { - let res = if microblocks { - inv.set_microblocks_available( - &self.burnchain, - outbound_neighbor_key, - sortdb, - consensus_hash, - ) - } else { - inv.set_block_available( - &self.burnchain, - outbound_neighbor_key, - sortdb, - consensus_hash, - ) - }; - - match res { - Ok(Some(block_height)) => block_height, - Ok(None) => { - debug!( - "{:?}: We already know the inventory state in {} for {}", - &self.local_peer, outbound_neighbor_key, consensus_hash - ); - return Ok(None); - } - Err(net_error::NotFoundError) => { - // is this remote node simply ahead of us? - if let Some(convo) = self.peers.get(&event_id) { - if self.chain_view.burn_block_height < convo.burnchain_tip_height { - debug!("{:?}: Unrecognized consensus hash {}; it is possible that {} is ahead of us", &self.local_peer, consensus_hash, outbound_neighbor_key); - return Err(net_error::NotFoundError); - } - } - // not ahead of us -- it's a bad consensus hash - debug!("{:?}: Unrecognized consensus hash {}; assuming that {} has a different chain view", &self.local_peer, consensus_hash, outbound_neighbor_key); - return Ok(None); - } - Err(net_error::InvalidMessage) => { - // punish this peer - info!( - "Peer {:?} sent an invalid update for {}", - &outbound_neighbor_key, - if microblocks { - "streamed microblocks" - } else { - "blocks" - } - ); - self.bans.insert(event_id); - - if let Some(outbound_event_id) = self.events.get(&outbound_neighbor_key) { - self.bans.insert(*outbound_event_id); - } - return Ok(None); - } - Err(e) => { - warn!( - "Failed to update inv state for {:?}: {:?}", - &outbound_neighbor_key, &e - ); - return Ok(None); - } - } - } - None => { - return Ok(None); - } - }; - Ok(Some(block_sortition_height)) - } - - /// Buffer a message for re-processing once the burnchain view updates - fn buffer_data_message(&mut self, event_id: usize, msg: StacksMessage) -> () { - if let Some(msgs) = self.pending_messages.get_mut(&event_id) { - // check limits: - // at most 1 BlocksAvailable - // at most 1 MicroblocksAvailable - // at most 1 BlocksData - // at most $self.connection_opts.max_buffered_microblocks MicroblocksDatas - let mut blocks_available = 0; - let mut microblocks_available = 0; - let mut blocks_data = 0; - let mut microblocks_data = 0; - for msg in msgs.iter() { - match &msg.payload { - StacksMessageType::BlocksAvailable(_) => { - blocks_available += 1; - } - StacksMessageType::MicroblocksAvailable(_) => { - microblocks_available += 1; - } - StacksMessageType::Blocks(_) => { - blocks_data += 1; - } - StacksMessageType::Microblocks(_) => { - microblocks_data += 1; - } - _ => {} - } - } - - if let StacksMessageType::BlocksAvailable(_) = &msg.payload { - if blocks_available >= self.connection_opts.max_buffered_blocks_available { - debug!( - "{:?}: Drop BlocksAvailable from event {} -- already have {} buffered", - &self.local_peer, event_id, blocks_available - ); - return; - } - } - if let StacksMessageType::MicroblocksAvailable(_) = &msg.payload { - if microblocks_available >= self.connection_opts.max_buffered_microblocks_available - { - debug!( - "{:?}: Drop MicroblocksAvailable from event {} -- already have {} buffered", - &self.local_peer, event_id, microblocks_available - ); - return; - } - } - if let StacksMessageType::Blocks(_) = &msg.payload { - if blocks_data >= self.connection_opts.max_buffered_blocks { - debug!( - "{:?}: Drop BlocksData from event {} -- already have {} buffered", - &self.local_peer, event_id, blocks_data - ); - return; - } - } - if let StacksMessageType::Microblocks(_) = &msg.payload { - if microblocks_data >= self.connection_opts.max_buffered_microblocks { - debug!( - "{:?}: Drop MicroblocksData from event {} -- already have {} buffered", - &self.local_peer, event_id, microblocks_data - ); - return; - } - } - msgs.push(msg); - debug!( - "{:?}: Event {} has {} messages buffered", - &self.local_peer, - event_id, - msgs.len() - ); - } else { - self.pending_messages.insert(event_id, vec![msg]); - debug!( - "{:?}: Event {} has 1 messages buffered", - &self.local_peer, event_id - ); - } - } - - /// Do we need a block or microblock stream, given its sortition's consensus hash? - fn need_block_or_microblock_stream( - sortdb: &SortitionDB, - chainstate: &StacksChainState, - consensus_hash: &ConsensusHash, - is_microblock: bool, - ) -> Result { - let sn = SortitionDB::get_block_snapshot_consensus(sortdb.conn(), &consensus_hash)? - .ok_or(chainstate_error::NoSuchBlockError)?; - let block_hash_opt = if sn.sortition { - Some(sn.winning_stacks_block_hash) - } else { - None - }; - - let inv = chainstate.get_blocks_inventory(&[(consensus_hash.clone(), block_hash_opt)])?; - if is_microblock { - // checking for microblock absence - Ok(inv.microblocks_bitvec[0] == 0) - } else { - // checking for block absence - Ok(inv.block_bitvec[0] == 0) - } - } - - /// Handle unsolicited BlocksAvailable. - /// Update our inv for this peer. - /// Mask errors. - /// Return whether or not we need to buffer this message - fn handle_unsolicited_BlocksAvailable( - &mut self, - sortdb: &SortitionDB, - chainstate: &StacksChainState, - event_id: usize, - new_blocks: &BlocksAvailableData, - ibd: bool, - buffer: bool, - ) -> bool { - let outbound_neighbor_key = match self.find_outbound_neighbor(event_id) { - Some(onk) => onk, - None => { - return false; - } - }; - - debug!( - "{:?}: Process BlocksAvailable from {:?} with {} entries", - &self.local_peer, - outbound_neighbor_key, - new_blocks.available.len() - ); - - let mut to_buffer = false; - for (consensus_hash, block_hash) in new_blocks.available.iter() { - let block_sortition_height = match self.handle_unsolicited_inv_update_epoch2x( - sortdb, - event_id, - &outbound_neighbor_key, - consensus_hash, - false, - ) { - Ok(Some(bsh)) => bsh, - Ok(None) => { - continue; - } - Err(net_error::NotFoundError) => { - if buffer { - debug!("{:?}: Will buffer BlocksAvailable for {} until the next burnchain view update", &self.local_peer, &consensus_hash); - to_buffer = true; - } - continue; - } - Err(e) => { - info!( - "{:?}: Failed to handle BlocksAvailable({}/{}) from {}: {:?}", - &self.local_peer, &consensus_hash, &block_hash, &outbound_neighbor_key, &e - ); - continue; - } - }; - - let need_block = match PeerNetwork::need_block_or_microblock_stream( - sortdb, - chainstate, - &consensus_hash, - false, - ) { - Ok(x) => x, - Err(e) => { - warn!( - "Failed to determine if we need block for consensus hash {}: {:?}", - &consensus_hash, &e - ); - false - } - }; - - debug!( - "Need block {}/{}? {}", - &consensus_hash, &block_hash, need_block - ); - - if need_block { - // have the downloader request this block if it's new and we don't have it - match self.block_downloader { - Some(ref mut downloader) => { - downloader.hint_block_sortition_height_available( - block_sortition_height, - ibd, - need_block, - ); - - // advance straight to download state if we're in inv state - if self.work_state == PeerNetworkWorkState::BlockInvSync { - debug!("{:?}: advance directly to block download with knowledge of block sortition {}", &self.local_peer, block_sortition_height); - } - self.have_data_to_download = true; - } - None => {} - } - } - } - - to_buffer - } - - /// Handle unsolicited MicroblocksAvailable. - /// Update our inv for this peer. - /// Mask errors. - /// Return whether or not we need to buffer this message - fn handle_unsolicited_MicroblocksAvailable( - &mut self, - sortdb: &SortitionDB, - chainstate: &StacksChainState, - event_id: usize, - new_mblocks: &BlocksAvailableData, - ibd: bool, - buffer: bool, - ) -> bool { - let outbound_neighbor_key = match self.find_outbound_neighbor(event_id) { - Some(onk) => onk, - None => { - return false; - } - }; - - debug!( - "{:?}: Process MicroblocksAvailable from {:?} with {} entries", - &self.local_peer, - outbound_neighbor_key, - new_mblocks.available.len() - ); - - let mut to_buffer = false; - for (consensus_hash, block_hash) in new_mblocks.available.iter() { - let mblock_sortition_height = match self.handle_unsolicited_inv_update_epoch2x( - sortdb, - event_id, - &outbound_neighbor_key, - consensus_hash, - true, - ) { - Ok(Some(bsh)) => bsh, - Ok(None) => { - continue; - } - Err(net_error::NotFoundError) => { - if buffer { - debug!("{:?}: Will buffer MicroblocksAvailable for {} until the next burnchain view update", &self.local_peer, &consensus_hash); - to_buffer = true; - } - continue; - } - Err(e) => { - info!( - "{:?}: Failed to handle MicroblocksAvailable({}/{}) from {}: {:?}", - &self.local_peer, &consensus_hash, &block_hash, &outbound_neighbor_key, &e - ); - continue; - } - }; - - let need_microblock_stream = match PeerNetwork::need_block_or_microblock_stream( - sortdb, - chainstate, - &consensus_hash, - true, - ) { - Ok(x) => x, - Err(e) => { - warn!("Failed to determine if we need microblock stream for consensus hash {}: {:?}", &consensus_hash, &e); - false - } - }; - - debug!( - "Need microblock stream {}/{}? {}", - &consensus_hash, &block_hash, need_microblock_stream - ); - - if need_microblock_stream { - // have the downloader request this microblock stream if it's new to us - match self.block_downloader { - Some(ref mut downloader) => { - downloader.hint_microblock_sortition_height_available( - mblock_sortition_height, - ibd, - need_microblock_stream, - ); - - // advance straight to download state if we're in inv state - if self.work_state == PeerNetworkWorkState::BlockInvSync { - debug!("{:?}: advance directly to block download with knowledge of microblock stream {}", &self.local_peer, mblock_sortition_height); - } - self.have_data_to_download = true; - } - None => {} - } - } - } - to_buffer - } - - /// Handle unsolicited BlocksData. - /// Don't (yet) validate the data, but do update our inv for the peer that sent it, if we have - /// an outbound connection to that peer. Accept the blocks data either way if it corresponds - /// to a winning sortition -- this will cause the blocks data to be fed into the relayer, which - /// will then decide whether or not it needs to be stored and/or forwarded. - /// Mask errors. - fn handle_unsolicited_BlocksData( - &mut self, - sortdb: &SortitionDB, - event_id: usize, - new_blocks: &BlocksData, - buffer: bool, - ) -> bool { - let (remote_neighbor_key, remote_is_authenticated) = match self.peers.get(&event_id) { - Some(convo) => (convo.to_neighbor_key(), convo.is_authenticated()), - None => { - test_debug!( - "{:?}: No such neighbor event={}", - &self.local_peer, - event_id - ); - return false; - } - }; - - if !remote_is_authenticated { - // drop -- a correct peer will have authenticated before sending this message - test_debug!( - "{:?}: Drop unauthenticated BlocksData from {:?}", - &self.local_peer, - &remote_neighbor_key - ); - return false; - } - - let outbound_neighbor_key_opt = self.find_outbound_neighbor(event_id); - - debug!( - "{:?}: Process BlocksData from {:?} with {} entries", - &self.local_peer, - outbound_neighbor_key_opt - .as_ref() - .unwrap_or(&remote_neighbor_key), - new_blocks.blocks.len() - ); - - let mut to_buffer = false; - - for BlocksDatum(consensus_hash, block) in new_blocks.blocks.iter() { - let sn = match SortitionDB::get_block_snapshot_consensus( - &sortdb.conn(), - &consensus_hash, - ) { - Ok(Some(sn)) => sn, - Ok(None) => { - if buffer { - debug!( - "{:?}: Will buffer unsolicited BlocksData({}/{}) ({}) -- consensus hash not (yet) recognized", - &self.local_peer, - &consensus_hash, - &block.block_hash(), - StacksBlockHeader::make_index_block_hash( - &consensus_hash, - &block.block_hash() - ) - ); - to_buffer = true; - } else { - debug!( - "{:?}: Will drop unsolicited BlocksData({}/{}) ({}) -- consensus hash not (yet) recognized", - &self.local_peer, - &consensus_hash, - &block.block_hash(), - StacksBlockHeader::make_index_block_hash( - &consensus_hash, - &block.block_hash() - ) - ); - } - continue; - } - Err(e) => { - info!( - "{:?}: Failed to query block snapshot for {}: {:?}", - &self.local_peer, consensus_hash, &e - ); - continue; - } - }; - - if !sn.pox_valid { - info!( - "{:?}: Failed to query snapshot for {}: not on the valid PoX fork", - &self.local_peer, consensus_hash - ); - continue; - } - - if sn.winning_stacks_block_hash != block.block_hash() { - info!( - "{:?}: Ignoring block {} -- winning block was {} (sortition: {})", - &self.local_peer, - block.block_hash(), - sn.winning_stacks_block_hash, - sn.sortition - ); - continue; - } - - // only bother updating the inventory for this event's peer if we have an outbound - // connection to it. - if let Some(outbound_neighbor_key) = outbound_neighbor_key_opt.as_ref() { - let _ = self.handle_unsolicited_inv_update_epoch2x( - sortdb, - event_id, - &outbound_neighbor_key, - &sn.consensus_hash, - false, - ); - } - } - - to_buffer - } - - /// Handle unsolicited MicroblocksData. - /// Returns whether or not to buffer (if buffer is true) - /// Returns whether or not to pass to the relayer (if buffer is false). - fn handle_unsolicited_MicroblocksData( - &mut self, - chainstate: &StacksChainState, - event_id: usize, - new_microblocks: &MicroblocksData, - buffer: bool, - ) -> bool { - let (remote_neighbor_key, remote_is_authenticated) = match self.peers.get(&event_id) { - Some(convo) => (convo.to_neighbor_key(), convo.is_authenticated()), - None => { - test_debug!( - "{:?}: No such neighbor event={}", - &self.local_peer, - event_id - ); - return false; - } - }; - - if !remote_is_authenticated { - // drop -- a correct peer will have authenticated before sending this message - test_debug!( - "{:?}: Drop unauthenticated MicroblocksData from {:?}", - &self.local_peer, - &remote_neighbor_key - ); - return false; - } - - let outbound_neighbor_key_opt = self.find_outbound_neighbor(event_id); - - debug!( - "{:?}: Process MicroblocksData from {:?} for {} with {} entries", - &self.local_peer, - outbound_neighbor_key_opt - .as_ref() - .unwrap_or(&remote_neighbor_key), - &new_microblocks.index_anchor_block, - new_microblocks.microblocks.len() - ); - - // do we have the associated anchored block? - match chainstate.get_block_header_hashes(&new_microblocks.index_anchor_block) { - Ok(Some(_)) => { - // yup; can process now - debug!("{:?}: have microblock parent anchored block {}, so can process its microblocks", &self.local_peer, &new_microblocks.index_anchor_block); - !buffer - } - Ok(None) => { - if buffer { - debug!( - "{:?}: Will buffer unsolicited MicroblocksData({})", - &self.local_peer, &new_microblocks.index_anchor_block - ); - true - } else { - debug!( - "{:?}: Will not buffer unsolicited MicroblocksData({})", - &self.local_peer, &new_microblocks.index_anchor_block - ); - false - } - } - Err(e) => { - warn!( - "{:?}: Failed to get header hashes for {:?}: {:?}", - &self.local_peer, &new_microblocks.index_anchor_block, &e - ); - false - } - } - } - - /// Returns (true, x) if we should buffer the message and try again - /// Returns (x, true) if the relayer should receive the message - fn handle_unsolicited_message( - &mut self, - sortdb: &SortitionDB, - chainstate: &StacksChainState, - event_id: usize, - preamble: &Preamble, - payload: &StacksMessageType, - ibd: bool, - buffer: bool, - ) -> (bool, bool) { - match payload { - // Update our inv state for this peer, but only do so if we have an - // outbound connection to it and it's authenticated (we don't synchronize inv - // state with inbound peers). Since we will have received this message - // from an _inbound_ conversation, we need to find the reciprocal _outbound_ - // conversation and use _that_ conversation's neighbor key to identify - // which inventory we need to update. - StacksMessageType::BlocksAvailable(ref new_blocks) => { - let to_buffer = self.handle_unsolicited_BlocksAvailable( - sortdb, chainstate, event_id, new_blocks, ibd, buffer, - ); - (to_buffer, false) - } - StacksMessageType::MicroblocksAvailable(ref new_mblocks) => { - let to_buffer = self.handle_unsolicited_MicroblocksAvailable( - sortdb, - chainstate, - event_id, - new_mblocks, - ibd, - buffer, - ); - (to_buffer, false) - } - StacksMessageType::Blocks(ref new_blocks) => { - // update inv state for this peer - let to_buffer = - self.handle_unsolicited_BlocksData(sortdb, event_id, new_blocks, buffer); - - // forward to relayer for processing - (to_buffer, true) - } - StacksMessageType::Microblocks(ref new_mblocks) => { - let to_buffer = self.handle_unsolicited_MicroblocksData( - chainstate, - event_id, - new_mblocks, - buffer, - ); - - // only forward to the relayer if we don't need to buffer it. - (to_buffer, true) - } - StacksMessageType::StackerDBPushChunk(ref data) => { - match self.handle_unsolicited_StackerDBPushChunk(event_id, preamble, data) { - Ok(x) => { - // don't buffer, but do reject if invalid - (false, x) - } - Err(e) => { - info!( - "{:?}: failed to handle unsolicited {:?}: {:?}", - &self.local_peer, payload, &e - ); - (false, false) - } - } - } - _ => (false, true), - } - } - - /// Handle unsolicited messages propagated up to us from our ongoing ConversationP2Ps. - /// Return messages that we couldn't handle here, but key them by neighbor, not event. - /// Drop invalid messages. - /// If buffer is true, then re-try handling this message once the burnchain view advances. - fn handle_unsolicited_messages( - &mut self, - sortdb: &SortitionDB, - chainstate: &StacksChainState, - unsolicited: HashMap>, - ibd: bool, - buffer: bool, - ) -> HashMap> { - let mut unhandled: HashMap> = HashMap::new(); - for (event_id, messages) in unsolicited.into_iter() { - if messages.len() == 0 { - // no messages for this event - continue; - } - - let neighbor_key = if let Some(convo) = self.peers.get(&event_id) { - convo.to_neighbor_key() - } else { - debug!( - "{:?}: No longer such neighbor event={}, dropping {} unsolicited messages", - &self.local_peer, - event_id, - messages.len() - ); - continue; - }; - - debug!("{:?}: Process {} unsolicited messages from {:?}", &self.local_peer, messages.len(), &neighbor_key; "buffer" => %buffer); - - for message in messages.into_iter() { - if !buffer { - debug!( - "{:?}: Re-try handling buffered message {} from {:?}", - &self.local_peer, - &message.payload.get_message_description(), - &neighbor_key - ); - } - let (to_buffer, relay) = self.handle_unsolicited_message( - sortdb, - chainstate, - event_id, - &message.preamble, - &message.payload, - ibd, - buffer, - ); - if buffer && to_buffer { - self.buffer_data_message(event_id, message); - } else if relay { - // forward to relayer for processing - debug!( - "{:?}: Will forward message {} from {:?} to relayer", - &self.local_peer, - &message.payload.get_message_description(), - &neighbor_key - ); - if let Some(msgs) = unhandled.get_mut(&neighbor_key) { - msgs.push(message); - } else { - unhandled.insert(neighbor_key.clone(), vec![message]); - } - } - } - } - unhandled - } - /// Find unauthenticated inbound conversations fn find_unauthenticated_inbound_convos(&self) -> Vec { let mut ret = vec![]; @@ -5301,125 +4161,197 @@ impl PeerNetwork { chainstate, sortdb, stacker_db_configs, + self.connection_opts.num_neighbors, )?; Ok(()) } /// Load up the parent stacks tip. /// For epoch 2.x, this is the pointer to the parent block of the current stacks tip - /// For epoch 3.x, this is the pointer to the tenure-start block of the parent tenure of the + /// For epoch 3.x, this is the pointer to the _tenure-start_ block of the parent tenure of the /// current stacks tip. /// If this is the first tenure in epoch 3.x, then this is the pointer to the epoch 2.x block /// that it builds atop. pub(crate) fn get_parent_stacks_tip( - cur_epoch: StacksEpochId, + &self, chainstate: &StacksChainState, stacks_tip_block_id: &StacksBlockId, - ) -> Result<(ConsensusHash, BlockHeaderHash, u64), net_error> { + ) -> Result { let header = NakamotoChainState::get_block_header(chainstate.db(), stacks_tip_block_id)? - .ok_or(net_error::DBError(db_error::NotFoundError))?; - - let parent_header = if cur_epoch < StacksEpochId::Epoch30 { - // prior to epoch 3.0, the self.prev_stacks_tip field is just the parent block - let parent_block_id = - StacksChainState::get_parent_block_id(chainstate.db(), &header.index_block_hash())? - .ok_or(net_error::DBError(db_error::NotFoundError))?; + .ok_or_else(|| { + debug!( + "{:?}: get_parent_stacks_tip: No such stacks block: {:?}", + self.get_local_peer(), + stacks_tip_block_id + ); + net_error::DBError(db_error::NotFoundError) + })?; - NakamotoChainState::get_block_header(chainstate.db(), &parent_block_id)? - .ok_or(net_error::DBError(db_error::NotFoundError))? - } else { - // in epoch 3.0 and later, self.prev_stacks_tip is the first tenure block of the - // current tip's parent tenure. - match NakamotoChainState::get_nakamoto_parent_tenure_id_consensus_hash( - chainstate.db(), + let tenure_start_header = NakamotoChainState::get_tenure_start_block_header( + &mut chainstate.index_conn(), + stacks_tip_block_id, + &header.consensus_hash, + )? + .ok_or_else(|| { + debug!( + "{:?}: get_parent_stacks_tip: No tenure-start block for {} off of {}", + self.get_local_peer(), &header.consensus_hash, - )? { - Some(ch) => NakamotoChainState::get_nakamoto_tenure_start_block_header( - chainstate.db(), - &ch, - )? - .ok_or(net_error::DBError(db_error::NotFoundError))?, - None => { - // parent in epoch 2 - let tenure_start_block_header = - NakamotoChainState::get_block_header_by_consensus_hash( - chainstate.db(), - &header.consensus_hash, - )? - .ok_or(net_error::DBError(db_error::NotFoundError))?; - - let nakamoto_header = tenure_start_block_header - .anchored_header - .as_stacks_nakamoto() - .ok_or(net_error::DBError(db_error::NotFoundError))?; - - NakamotoChainState::get_block_header( - chainstate.db(), - &nakamoto_header.parent_block_id, - )? - .ok_or(net_error::DBError(db_error::NotFoundError))? - } + stacks_tip_block_id + ); + net_error::DBError(db_error::NotFoundError) + })?; + + let parent_block_id = match tenure_start_header.anchored_header { + StacksBlockHeaderTypes::Nakamoto(ref nakamoto_header) => { + nakamoto_header.parent_block_id.clone() } + StacksBlockHeaderTypes::Epoch2(..) => StacksChainState::get_parent_block_id( + chainstate.db(), + &tenure_start_header.index_block_hash(), + )? + .ok_or_else(|| { + debug!( + "{:?}: get_parent_stacks_tip: No parent block ID found for epoch2x block {}", + self.get_local_peer(), + &tenure_start_header.index_block_hash() + ); + net_error::DBError(db_error::NotFoundError) + })?, + }; + + let parent_header = + NakamotoChainState::get_block_header(chainstate.db(), &parent_block_id)?.ok_or_else( + || { + debug!( + "{:?}: get_parent_stacks_tip: No such parent stacks block: {:?}", + self.get_local_peer(), + &parent_block_id + ); + net_error::DBError(db_error::NotFoundError) + }, + )?; + + let parent_tenure_start_header = NakamotoChainState::get_tenure_start_block_header(&mut chainstate.index_conn(), stacks_tip_block_id, &parent_header.consensus_hash)? + .ok_or_else(|| { + debug!("{:?}: get_parent_stacks_tip: No tenure-start block for parent tenure {} off of child {} (parnet {})", self.get_local_peer(), &parent_header.consensus_hash, stacks_tip_block_id, &parent_block_id); + net_error::DBError(db_error::NotFoundError) + })?; + + let parent_stacks_tip = StacksTipInfo { + consensus_hash: parent_tenure_start_header.consensus_hash, + block_hash: parent_tenure_start_header.anchored_header.block_hash(), + height: parent_tenure_start_header.anchored_header.height(), + is_nakamoto: parent_tenure_start_header + .anchored_header + .as_stacks_nakamoto() + .is_some(), }; - Ok(( - parent_header.consensus_hash, - parent_header.anchored_header.block_hash(), - parent_header.anchored_header.height(), - )) - } - - /// Refresh our view of the aggregate public keys - /// Returns a list of (reward-cycle, option(pubkey)) pairs. - /// An option(pubkey) is defined for all reward cycles, but for epochs 2.4 and earlier, it will - /// be None. - fn find_new_aggregate_public_keys( + test_debug!( + "{:?}: Parent Stacks tip off of {} is {:?}", + self.get_local_peer(), + &stacks_tip_block_id, + &parent_stacks_tip + ); + Ok(parent_stacks_tip) + } + + /// Clear out old reward cycles + fn free_old_reward_cycles(&mut self, rc: u64) { + if self.current_reward_sets.len() > 3 { + self.current_reward_sets.retain(|old_rc, _| { + if (*old_rc).saturating_add(2) < rc { + test_debug!("Drop reward cycle info for cycle {}", old_rc); + return false; + } + true + }); + } + } + + /// Refresh our view of the last three reward cycles + /// This ensures that the PeerNetwork has cached copies of the reward cycle data (including the + /// signing set) for the current, previous, and previous-previous reward cycles. This data is + /// in turn consumed by the Nakamoto block downloader, which must validate blocks signed from + /// any of these reward cycles. + #[cfg_attr(test, mutants::skip)] + fn refresh_reward_cycles( &mut self, sortdb: &SortitionDB, - tip_sn: &BlockSnapshot, chainstate: &mut StacksChainState, - stacks_tip_block_id: &StacksBlockId, - ) -> Result)>, net_error> { - let sort_tip_rc = self + tip_sn: &BlockSnapshot, + tip_block_id: &StacksBlockId, + ) -> Result<(), net_error> { + let cur_rc = self .burnchain .block_height_to_reward_cycle(tip_sn.block_height) .expect("FATAL: sortition from before system start"); - let next_agg_pubkey_rc = self - .aggregate_public_keys - .last_key_value() - .map(|(rc, _)| rc.saturating_add(1)) - .unwrap_or(0); - let mut new_agg_pubkeys: Vec<_> = (next_agg_pubkey_rc..=sort_tip_rc) - .filter_map(|key_rc| { - let ih = sortdb.index_handle(&tip_sn.sortition_id); - let agg_pubkey_opt = if self.get_current_epoch().epoch_id < StacksEpochId::Epoch25 { - None - } else { - test_debug!( - "Try to get aggregate public key for reward cycle {}", - key_rc - ); - NakamotoChainState::load_aggregate_public_key( - sortdb, - &ih, - chainstate, - self.burnchain.reward_cycle_to_block_height(key_rc), - &stacks_tip_block_id, - false, - ) - .ok() - }; - if agg_pubkey_opt.is_none() { - return None; + + let prev_rc = cur_rc.saturating_sub(1); + let prev_prev_rc = prev_rc.saturating_sub(1); + let ih = sortdb.index_handle(&tip_sn.sortition_id); + + for rc in [cur_rc, prev_rc, prev_prev_rc] { + let rc_start_height = self.burnchain.reward_cycle_to_block_height(rc); + let Some(ancestor_sort_id) = + get_ancestor_sort_id(&ih, rc_start_height, &tip_sn.sortition_id)? + else { + // reward cycle is too far back for there to be an ancestor + continue; + }; + let ancestor_ih = sortdb.index_handle(&ancestor_sort_id); + let anchor_hash_opt = ancestor_ih.get_last_anchor_block_hash()?; + + if let Some(cached_rc_info) = self.current_reward_sets.get(&rc) { + if let Some(anchor_hash) = anchor_hash_opt.as_ref() { + // careful -- the sortition DB stores a StacksBlockId's value (the tenure-start + // StacksBlockId) as a BlockHeaderHash, since that's what it was designed to + // deal with in the pre-Nakamoto days + if cached_rc_info.anchor_block_id() == StacksBlockId(anchor_hash.0.clone()) + || cached_rc_info.anchor_block_hash == *anchor_hash + { + // cached reward set data is still valid + continue; + } } - Some((key_rc, agg_pubkey_opt)) + } + + let Some((reward_set_info, anchor_block_header)) = load_nakamoto_reward_set( + rc, + &tip_sn.sortition_id, + &self.burnchain, + chainstate, + tip_block_id, + sortdb, + &OnChainRewardSetProvider::new(), + ) + .map_err(|e| { + warn!( + "Failed to load reward cycle info for cycle {}: {:?}", + rc, &e + ); + e }) - .collect(); + .unwrap_or(None) else { + continue; + }; + + let rc_info = CurrentRewardSet { + reward_cycle: rc, + reward_cycle_info: reward_set_info, + anchor_block_consensus_hash: anchor_block_header.consensus_hash, + anchor_block_hash: anchor_block_header.anchored_header.block_hash(), + }; - if new_agg_pubkeys.len() == 0 && self.aggregate_public_keys.len() == 0 { - // special case -- we're before epoch 3.0, so don't waste time doing this again - new_agg_pubkeys.push((sort_tip_rc, None)); + test_debug!( + "Store cached reward set for reward cycle {} anchor block {}", + rc, + &rc_info.anchor_block_hash + ); + self.current_reward_sets.insert(rc, rc_info); } - Ok(new_agg_pubkeys) + self.free_old_reward_cycles(cur_rc); + Ok(()) } /// Refresh view of burnchain, if needed. @@ -5438,59 +4370,78 @@ impl PeerNetwork { ) -> Result>, net_error> { // update burnchain snapshot if we need to (careful -- it's expensive) let canonical_sn = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn())?; - let stacks_tip = + let (stacks_tip_ch, stacks_tip_bhh, stacks_tip_height) = SortitionDB::get_canonical_stacks_chain_tip_hash_and_height(sortdb.conn())?; let burnchain_tip_changed = canonical_sn.block_height != self.chain_view.burn_block_height - || self.num_state_machine_passes == 0; - let stacks_tip_changed = self.stacks_tip != stacks_tip; - let new_stacks_tip_block_id = StacksBlockId::new(&stacks_tip.0, &stacks_tip.1); + || self.num_state_machine_passes == 0 + || canonical_sn.sortition_id != self.burnchain_tip.sortition_id; + + let stacks_tip_changed = self.stacks_tip.consensus_hash != stacks_tip_ch + || self.stacks_tip.block_hash != stacks_tip_bhh + || self.stacks_tip.height != stacks_tip_height; + + let new_stacks_tip_block_id = StacksBlockId::new(&stacks_tip_ch, &stacks_tip_bhh); + let stacks_tip_is_nakamoto = if stacks_tip_changed { + // go check + chainstate + .nakamoto_blocks_db() + .has_nakamoto_block_with_index_hash(&new_stacks_tip_block_id) + .unwrap_or(false) + } else { + self.stacks_tip.is_nakamoto + }; + let need_stackerdb_refresh = canonical_sn.canonical_stacks_tip_consensus_hash != self.burnchain_tip.canonical_stacks_tip_consensus_hash || burnchain_tip_changed || stacks_tip_changed; + + if burnchain_tip_changed || stacks_tip_changed { + self.refresh_reward_cycles( + sortdb, + chainstate, + &canonical_sn, + &new_stacks_tip_block_id, + )?; + } + let mut ret: HashMap> = HashMap::new(); - let aggregate_public_keys = self.find_new_aggregate_public_keys( - sortdb, - &canonical_sn, - chainstate, - &new_stacks_tip_block_id, - )?; - let (parent_stacks_tip, tenure_start_block_id, stacks_tip_sn) = if stacks_tip_changed { - let stacks_tip_sn = - SortitionDB::get_block_snapshot_consensus(sortdb.conn(), &stacks_tip.0)?; + let (parent_stacks_tip, tenure_start_block_id) = if stacks_tip_changed { let tenure_start_block_id = if let Some(header) = NakamotoChainState::get_nakamoto_tenure_start_block_header( - chainstate.db(), - &stacks_tip.0, + &mut chainstate.index_conn(), + &new_stacks_tip_block_id, + &stacks_tip_ch, )? { header.index_block_hash() } else { new_stacks_tip_block_id.clone() }; - let parent_tip_id = match Self::get_parent_stacks_tip( - self.get_current_epoch().epoch_id, - chainstate, - &new_stacks_tip_block_id, - ) { - Ok(tip_id) => tip_id, + let parent_tip = match self.get_parent_stacks_tip(chainstate, &new_stacks_tip_block_id) + { + Ok(tip) => tip, Err(net_error::DBError(db_error::NotFoundError)) => { // this is the first block - ( - FIRST_BURNCHAIN_CONSENSUS_HASH.clone(), - FIRST_STACKS_BLOCK_HASH.clone(), - 0, - ) + debug!( + "First-ever block (no parent): {:?} ({}/{})", + &new_stacks_tip_block_id, &stacks_tip_ch, &stacks_tip_bhh + ); + StacksTipInfo { + consensus_hash: FIRST_BURNCHAIN_CONSENSUS_HASH.clone(), + block_hash: FIRST_STACKS_BLOCK_HASH.clone(), + height: 0, + is_nakamoto: false, + } } Err(e) => return Err(e), }; - (parent_tip_id, tenure_start_block_id, stacks_tip_sn) + (parent_tip, tenure_start_block_id) } else { ( self.parent_stacks_tip.clone(), self.tenure_start_block_id.clone(), - self.stacks_tip_sn.clone(), ) }; @@ -5523,21 +4474,24 @@ impl PeerNetwork { } if burnchain_tip_changed { - // wake up the inv-sync and downloader -- we have potentially more sortitions - self.hint_sync_invs(self.chain_view.burn_stable_block_height); + if !ibd { + // wake up the inv-sync and downloader -- we have potentially more sortitions + self.hint_sync_invs(self.chain_view.burn_stable_block_height); + + // set up the antientropy protocol to try pushing the latest block + // (helps if you're a miner who gets temporarily disconnected) + self.antientropy_last_push_ts = get_epoch_time_secs(); + self.antientropy_start_reward_cycle = + self.pox_id.num_inventory_reward_cycles().saturating_sub(1) as u64; + } + self.hint_download_rescan( self.chain_view .burn_stable_block_height .saturating_sub(self.burnchain.first_block_height), - false, + ibd, ); - // set up the antientropy protocol to try pushing the latest block - // (helps if you're a miner who gets temporarily disconnected) - self.antientropy_last_push_ts = get_epoch_time_secs(); - self.antientropy_start_reward_cycle = - self.pox_id.num_inventory_reward_cycles().saturating_sub(1) as u64; - // update tx validation information self.ast_rules = SortitionDB::get_ast_rules(sortdb.conn(), canonical_sn.block_height)?; @@ -5624,13 +4578,27 @@ impl PeerNetwork { // update cached stacks chain view for /v2/info and /v3/tenures/info self.burnchain_tip = canonical_sn; - self.stacks_tip = stacks_tip; - self.stacks_tip_sn = stacks_tip_sn; - self.parent_stacks_tip = parent_stacks_tip; - for (key_rc, agg_pubkey_opt) in aggregate_public_keys { - self.aggregate_public_keys.insert(key_rc, agg_pubkey_opt); - } self.tenure_start_block_id = tenure_start_block_id; + if stacks_tip_changed { + self.stacks_tip = StacksTipInfo { + consensus_hash: stacks_tip_ch, + block_hash: stacks_tip_bhh, + height: stacks_tip_height, + is_nakamoto: stacks_tip_is_nakamoto, + }; + self.parent_stacks_tip = parent_stacks_tip; + + test_debug!( + "{:?}: canonical Stacks tip is now {:?}", + self.get_local_peer(), + &self.stacks_tip + ); + test_debug!( + "{:?}: parent canonical Stacks tip is now {:?}", + self.get_local_peer(), + &self.parent_stacks_tip + ); + } Ok(ret) } @@ -5721,7 +4689,7 @@ impl PeerNetwork { // In parallel, do a mempool sync. // Remember any txs we get, so we can feed them to the relayer thread. - if let Some(mut txs) = self.do_network_mempool_sync(&mut dns_client_opt, mempool, ibd) { + if let Some(mut txs) = self.run_mempool_sync(&mut dns_client_opt, mempool, ibd) { network_result.synced_transactions.append(&mut txs); } @@ -5826,16 +4794,17 @@ impl PeerNetwork { debug!("Already have tx {}", txid); return false; } - let stacks_epoch = match sortdb - .index_conn() - .get_stacks_epoch(burnchain_tip.block_height as u32) + let stacks_epoch = match SortitionDB::get_stacks_epoch( + sortdb.conn(), + burnchain_tip.block_height, + ) + .ok() + .flatten() { Some(epoch) => epoch, None => { - warn!( - "Failed to store transaction because could not load Stacks epoch for canonical burn height = {}", - burnchain_tip.block_height - ); + warn!("Failed to store transaction because could not load Stacks epoch for canonical burn height = {}", + burnchain_tip.block_height); return false; } }; @@ -5850,7 +4819,7 @@ impl PeerNetwork { &stacks_epoch.block_limit, &stacks_epoch.epoch_id, ) { - warn!("Transaction rejected from mempool, {}", &e.into_json(&txid)); + info!("Transaction rejected from mempool, {}", &e.into_json(&txid)); return false; } @@ -6044,9 +5013,11 @@ impl PeerNetwork { }; let mut network_result = NetworkResult::new( + self.stacks_tip.block_id(), self.num_state_machine_passes, self.num_inv_sync_passes, self.num_downloader_passes, + self.peers.len(), self.chain_view.burn_block_height, self.chain_view.rc_consensus_hash.clone(), self.get_stacker_db_configs_owned(), @@ -6073,7 +5044,7 @@ impl PeerNetwork { PeerNetwork::with_network_state(self, |ref mut network, ref mut network_state| { let http_stacks_msgs = PeerNetwork::with_http(network, |ref mut net, ref mut http| { let mut node_state = - StacksNodeState::new(net, sortdb, chainstate, mempool, handler_args); + StacksNodeState::new(net, sortdb, chainstate, mempool, handler_args, ibd); http.run(network_state, &mut node_state, http_poll_state) }); network_result.consume_http_uploads(http_stacks_msgs); @@ -6126,8 +5097,8 @@ mod test { use crate::net::atlas::*; use crate::net::codec::*; use crate::net::db::*; - use crate::net::relay::test::make_contract_tx; use crate::net::test::*; + use crate::net::tests::relay::epoch2x::make_contract_tx; use crate::net::*; use crate::util_lib::test::*; @@ -6478,827 +5449,29 @@ mod test { } #[test] - fn test_mempool_sync_2_peers() { - // peer 1 gets some transactions; verify peer 2 gets the recent ones and not the old - // ones - let mut peer_1_config = TestPeerConfig::new(function_name!(), 2210, 2211); - let mut peer_2_config = TestPeerConfig::new(function_name!(), 2212, 2213); - - peer_1_config.add_neighbor(&peer_2_config.to_neighbor()); - peer_2_config.add_neighbor(&peer_1_config.to_neighbor()); - - peer_1_config.connection_opts.mempool_sync_interval = 1; - peer_2_config.connection_opts.mempool_sync_interval = 1; - - let num_txs = 10; - let pks: Vec<_> = (0..num_txs).map(|_| StacksPrivateKey::new()).collect(); - let addrs: Vec<_> = pks.iter().map(|pk| to_addr(pk)).collect(); - let initial_balances: Vec<_> = addrs - .iter() - .map(|a| (a.to_account_principal(), 1000000000)) - .collect(); - - peer_1_config.initial_balances = initial_balances.clone(); - peer_2_config.initial_balances = initial_balances.clone(); - - let mut peer_1 = TestPeer::new(peer_1_config); - let mut peer_2 = TestPeer::new(peer_2_config); - - let num_blocks = 10; - let first_stacks_block_height = { - let sn = - SortitionDB::get_canonical_burn_chain_tip(&peer_1.sortdb.as_ref().unwrap().conn()) - .unwrap(); - sn.block_height + 1 - }; - - for i in 0..(num_blocks / 2) { - let (burn_ops, stacks_block, microblocks) = peer_2.make_default_tenure(); - - peer_1.next_burnchain_block(burn_ops.clone()); - peer_2.next_burnchain_block(burn_ops.clone()); - - peer_1.process_stacks_epoch_at_tip(&stacks_block, µblocks); - peer_2.process_stacks_epoch_at_tip(&stacks_block, µblocks); - } - - let addr = StacksAddress { - version: C32_ADDRESS_VERSION_TESTNET_SINGLESIG, - bytes: Hash160([0xff; 20]), - }; - - // old transactions - let num_txs = 10; - let mut old_txs = HashMap::new(); - let mut peer_1_mempool = peer_1.mempool.take().unwrap(); - let mut mempool_tx = peer_1_mempool.tx_begin().unwrap(); - for i in 0..num_txs { - let pk = &pks[i]; - let mut tx = StacksTransaction { - version: TransactionVersion::Testnet, - chain_id: 0x80000000, - auth: TransactionAuth::from_p2pkh(&pk).unwrap(), - anchor_mode: TransactionAnchorMode::Any, - post_condition_mode: TransactionPostConditionMode::Allow, - post_conditions: vec![], - payload: TransactionPayload::TokenTransfer( - addr.to_account_principal(), - 123, - TokenTransferMemo([0u8; 34]), - ), - }; - tx.set_tx_fee(1000); - tx.set_origin_nonce(0); - - let mut tx_signer = StacksTransactionSigner::new(&tx); - tx_signer.sign_origin(&pk).unwrap(); - - let tx = tx_signer.get_tx().unwrap(); - - let txid = tx.txid(); - let tx_bytes = tx.serialize_to_vec(); - let origin_addr = tx.origin_address(); - let origin_nonce = tx.get_origin_nonce(); - let sponsor_addr = tx.sponsor_address().unwrap_or(origin_addr.clone()); - let sponsor_nonce = tx.get_sponsor_nonce().unwrap_or(origin_nonce); - let tx_fee = tx.get_tx_fee(); - - old_txs.insert(tx.txid(), tx.clone()); - - // should succeed - MemPoolDB::try_add_tx( - &mut mempool_tx, - peer_1.chainstate(), - &ConsensusHash([0x1 + (num_blocks as u8); 20]), - &BlockHeaderHash([0x2 + (num_blocks as u8); 32]), - txid.clone(), - tx_bytes, - tx_fee, - (num_blocks / 2) as u64, - &origin_addr, - origin_nonce, - &sponsor_addr, - sponsor_nonce, - None, - ) - .unwrap(); - - eprintln!("Added {} {}", i, &txid); - } - mempool_tx.commit().unwrap(); - peer_1.mempool = Some(peer_1_mempool); - - // keep mining to make these txs old - for i in (num_blocks / 2)..num_blocks { - let (burn_ops, stacks_block, microblocks) = peer_2.make_default_tenure(); - - peer_1.next_burnchain_block(burn_ops.clone()); - peer_2.next_burnchain_block(burn_ops.clone()); - - peer_1.process_stacks_epoch_at_tip(&stacks_block, µblocks); - peer_2.process_stacks_epoch_at_tip(&stacks_block, µblocks); - } - - let num_burn_blocks = { - let sn = - SortitionDB::get_canonical_burn_chain_tip(peer_1.sortdb.as_ref().unwrap().conn()) - .unwrap(); - sn.block_height + 1 - }; - - let mut txs = HashMap::new(); - let mut peer_1_mempool = peer_1.mempool.take().unwrap(); - let mut mempool_tx = peer_1_mempool.tx_begin().unwrap(); - for i in 0..num_txs { - let pk = &pks[i]; - let mut tx = StacksTransaction { - version: TransactionVersion::Testnet, - chain_id: 0x80000000, - auth: TransactionAuth::from_p2pkh(&pk).unwrap(), - anchor_mode: TransactionAnchorMode::Any, - post_condition_mode: TransactionPostConditionMode::Allow, - post_conditions: vec![], - payload: TransactionPayload::TokenTransfer( - addr.to_account_principal(), - 123, - TokenTransferMemo([0u8; 34]), - ), - }; - tx.set_tx_fee(1000); - tx.set_origin_nonce(1); - - let mut tx_signer = StacksTransactionSigner::new(&tx); - tx_signer.sign_origin(&pk).unwrap(); - - let tx = tx_signer.get_tx().unwrap(); - - let txid = tx.txid(); - let tx_bytes = tx.serialize_to_vec(); - let origin_addr = tx.origin_address(); - let origin_nonce = tx.get_origin_nonce(); - let sponsor_addr = tx.sponsor_address().unwrap_or(origin_addr.clone()); - let sponsor_nonce = tx.get_sponsor_nonce().unwrap_or(origin_nonce); - let tx_fee = tx.get_tx_fee(); - - txs.insert(tx.txid(), tx.clone()); - - // should succeed - MemPoolDB::try_add_tx( - &mut mempool_tx, - peer_1.chainstate(), - &ConsensusHash([0x1 + (num_blocks as u8); 20]), - &BlockHeaderHash([0x2 + (num_blocks as u8); 32]), - txid.clone(), - tx_bytes, - tx_fee, - num_blocks as u64, - &origin_addr, - origin_nonce, - &sponsor_addr, - sponsor_nonce, - None, - ) - .unwrap(); - - eprintln!("Added {} {}", i, &txid); - } - mempool_tx.commit().unwrap(); - peer_1.mempool = Some(peer_1_mempool); - - let mut round = 0; - let mut peer_1_mempool_txs = 0; - let mut peer_2_mempool_txs = 0; - - while peer_1_mempool_txs < num_txs || peer_2_mempool_txs < num_txs { - if let Ok(mut result) = peer_1.step_with_ibd(false) { - let lp = peer_1.network.local_peer.clone(); - peer_1 - .with_db_state(|sortdb, chainstate, relayer, mempool| { - relayer.process_network_result( - &lp, - &mut result, - sortdb, - chainstate, - mempool, - false, - None, - None, - ) - }) - .unwrap(); - } - - if let Ok(mut result) = peer_2.step_with_ibd(false) { - let lp = peer_2.network.local_peer.clone(); - peer_2 - .with_db_state(|sortdb, chainstate, relayer, mempool| { - relayer.process_network_result( - &lp, - &mut result, - sortdb, - chainstate, - mempool, - false, - None, - None, - ) - }) - .unwrap(); - } - - round += 1; - - let mp = peer_1.mempool.take().unwrap(); - peer_1_mempool_txs = MemPoolDB::get_all_txs(mp.conn()).unwrap().len(); - peer_1.mempool.replace(mp); - - let mp = peer_2.mempool.take().unwrap(); - peer_2_mempool_txs = MemPoolDB::get_all_txs(mp.conn()).unwrap().len(); - peer_2.mempool.replace(mp); - - info!( - "Peer 1: {}, Peer 2: {}", - peer_1_mempool_txs, peer_2_mempool_txs - ); - } - - info!("Completed mempool sync in {} step(s)", round); - - let mp = peer_2.mempool.take().unwrap(); - let peer_2_mempool_txs = MemPoolDB::get_all_txs(mp.conn()).unwrap(); - peer_2.mempool.replace(mp); - - // peer 2 has all the recent txs - // peer 2 has none of the old ones - for tx in peer_2_mempool_txs { - assert_eq!(&tx.tx, txs.get(&tx.tx.txid()).unwrap()); - assert!(old_txs.get(&tx.tx.txid()).is_none()); - } - } - - #[test] - fn test_mempool_sync_2_peers_paginated() { - // peer 1 gets some transactions; verify peer 2 gets them all - let mut peer_1_config = TestPeerConfig::new(function_name!(), 2214, 2215); - let mut peer_2_config = TestPeerConfig::new(function_name!(), 2216, 2217); - - peer_1_config.add_neighbor(&peer_2_config.to_neighbor()); - peer_2_config.add_neighbor(&peer_1_config.to_neighbor()); - - peer_1_config.connection_opts.mempool_sync_interval = 1; - peer_2_config.connection_opts.mempool_sync_interval = 1; - - let num_txs = 1024; - let pks: Vec<_> = (0..num_txs).map(|_| StacksPrivateKey::new()).collect(); - let addrs: Vec<_> = pks.iter().map(|pk| to_addr(pk)).collect(); - let initial_balances: Vec<_> = addrs - .iter() - .map(|a| (a.to_account_principal(), 1000000000)) - .collect(); - - peer_1_config.initial_balances = initial_balances.clone(); - peer_2_config.initial_balances = initial_balances.clone(); - - let mut peer_1 = TestPeer::new(peer_1_config); - let mut peer_2 = TestPeer::new(peer_2_config); - - let num_blocks = 10; - let first_stacks_block_height = { - let sn = - SortitionDB::get_canonical_burn_chain_tip(&peer_1.sortdb.as_ref().unwrap().conn()) - .unwrap(); - sn.block_height + 1 - }; - - for i in 0..num_blocks { - let (burn_ops, stacks_block, microblocks) = peer_2.make_default_tenure(); - - peer_1.next_burnchain_block(burn_ops.clone()); - peer_2.next_burnchain_block(burn_ops.clone()); - - peer_1.process_stacks_epoch_at_tip(&stacks_block, µblocks); - peer_2.process_stacks_epoch_at_tip(&stacks_block, µblocks); - } - - let addr = StacksAddress { - version: C32_ADDRESS_VERSION_TESTNET_SINGLESIG, - bytes: Hash160([0xff; 20]), - }; - - // fill peer 1 with lots of transactions - let mut txs = HashMap::new(); - let mut peer_1_mempool = peer_1.mempool.take().unwrap(); - let mut mempool_tx = peer_1_mempool.tx_begin().unwrap(); - for i in 0..num_txs { - let pk = &pks[i]; - let mut tx = StacksTransaction { - version: TransactionVersion::Testnet, - chain_id: 0x80000000, - auth: TransactionAuth::from_p2pkh(&pk).unwrap(), - anchor_mode: TransactionAnchorMode::Any, - post_condition_mode: TransactionPostConditionMode::Allow, - post_conditions: vec![], - payload: TransactionPayload::TokenTransfer( - addr.to_account_principal(), - 123, - TokenTransferMemo([0u8; 34]), - ), - }; - tx.set_tx_fee(1000); - tx.set_origin_nonce(0); - - let mut tx_signer = StacksTransactionSigner::new(&tx); - tx_signer.sign_origin(&pk).unwrap(); - - let tx = tx_signer.get_tx().unwrap(); - - let txid = tx.txid(); - let tx_bytes = tx.serialize_to_vec(); - let origin_addr = tx.origin_address(); - let origin_nonce = tx.get_origin_nonce(); - let sponsor_addr = tx.sponsor_address().unwrap_or(origin_addr.clone()); - let sponsor_nonce = tx.get_sponsor_nonce().unwrap_or(origin_nonce); - let tx_fee = tx.get_tx_fee(); - - txs.insert(tx.txid(), tx.clone()); - - // should succeed - MemPoolDB::try_add_tx( - &mut mempool_tx, - peer_1.chainstate(), - &ConsensusHash([0x1 + (num_blocks as u8); 20]), - &BlockHeaderHash([0x2 + (num_blocks as u8); 32]), - txid.clone(), - tx_bytes, - tx_fee, - num_blocks, - &origin_addr, - origin_nonce, - &sponsor_addr, - sponsor_nonce, - None, - ) - .unwrap(); - - eprintln!("Added {} {}", i, &txid); - } - mempool_tx.commit().unwrap(); - peer_1.mempool = Some(peer_1_mempool); - - let num_burn_blocks = { - let sn = - SortitionDB::get_canonical_burn_chain_tip(peer_1.sortdb.as_ref().unwrap().conn()) - .unwrap(); - sn.block_height + 1 - }; - - let mut round = 0; - let mut peer_1_mempool_txs = 0; - let mut peer_2_mempool_txs = 0; - - while peer_1_mempool_txs < num_txs || peer_2_mempool_txs < num_txs { - if let Ok(mut result) = peer_1.step_with_ibd(false) { - let lp = peer_1.network.local_peer.clone(); - peer_1 - .with_db_state(|sortdb, chainstate, relayer, mempool| { - relayer.process_network_result( - &lp, - &mut result, - sortdb, - chainstate, - mempool, - false, - None, - None, - ) - }) - .unwrap(); - } - - if let Ok(mut result) = peer_2.step_with_ibd(false) { - let lp = peer_2.network.local_peer.clone(); - peer_2 - .with_db_state(|sortdb, chainstate, relayer, mempool| { - relayer.process_network_result( - &lp, - &mut result, - sortdb, - chainstate, - mempool, - false, - None, - None, - ) - }) - .unwrap(); - } - - round += 1; - - let mp = peer_1.mempool.take().unwrap(); - peer_1_mempool_txs = MemPoolDB::get_all_txs(mp.conn()).unwrap().len(); - peer_1.mempool.replace(mp); - - let mp = peer_2.mempool.take().unwrap(); - peer_2_mempool_txs = MemPoolDB::get_all_txs(mp.conn()).unwrap().len(); - peer_2.mempool.replace(mp); - - info!( - "Peer 1: {}, Peer 2: {}", - peer_1_mempool_txs, peer_2_mempool_txs - ); - } - - info!("Completed mempool sync in {} step(s)", round); - - let mp = peer_2.mempool.take().unwrap(); - let peer_2_mempool_txs = MemPoolDB::get_all_txs(mp.conn()).unwrap(); - peer_2.mempool.replace(mp); - - for tx in peer_2_mempool_txs { - assert_eq!(&tx.tx, txs.get(&tx.tx.txid()).unwrap()); - } - } - - #[test] - fn test_mempool_sync_2_peers_blacklisted() { - // peer 1 gets some transactions; peer 2 blacklists some of them; - // verify peer 2 gets only the non-blacklisted ones. - let mut peer_1_config = TestPeerConfig::new(function_name!(), 2218, 2219); - let mut peer_2_config = TestPeerConfig::new(function_name!(), 2220, 2221); - - peer_1_config.add_neighbor(&peer_2_config.to_neighbor()); - peer_2_config.add_neighbor(&peer_1_config.to_neighbor()); - - peer_1_config.connection_opts.mempool_sync_interval = 1; - peer_2_config.connection_opts.mempool_sync_interval = 1; - - let num_txs = 1024; - let pks: Vec<_> = (0..num_txs).map(|_| StacksPrivateKey::new()).collect(); - let addrs: Vec<_> = pks.iter().map(|pk| to_addr(pk)).collect(); - let initial_balances: Vec<_> = addrs - .iter() - .map(|a| (a.to_account_principal(), 1000000000)) - .collect(); - - peer_1_config.initial_balances = initial_balances.clone(); - peer_2_config.initial_balances = initial_balances.clone(); - - let mut peer_1 = TestPeer::new(peer_1_config); - let mut peer_2 = TestPeer::new(peer_2_config); - - let num_blocks = 10; - let first_stacks_block_height = { - let sn = - SortitionDB::get_canonical_burn_chain_tip(&peer_1.sortdb.as_ref().unwrap().conn()) - .unwrap(); - sn.block_height + 1 - }; - - for i in 0..num_blocks { - let (burn_ops, stacks_block, microblocks) = peer_2.make_default_tenure(); - - peer_1.next_burnchain_block(burn_ops.clone()); - peer_2.next_burnchain_block(burn_ops.clone()); - - peer_1.process_stacks_epoch_at_tip(&stacks_block, µblocks); - peer_2.process_stacks_epoch_at_tip(&stacks_block, µblocks); - } - - let addr = StacksAddress { - version: C32_ADDRESS_VERSION_TESTNET_SINGLESIG, - bytes: Hash160([0xff; 20]), - }; - - // fill peer 1 with lots of transactions - let mut txs = HashMap::new(); - let mut peer_1_mempool = peer_1.mempool.take().unwrap(); - let mut mempool_tx = peer_1_mempool.tx_begin().unwrap(); - let mut peer_2_blacklist = vec![]; - for i in 0..num_txs { - let pk = &pks[i]; - let mut tx = StacksTransaction { - version: TransactionVersion::Testnet, - chain_id: 0x80000000, - auth: TransactionAuth::from_p2pkh(&pk).unwrap(), - anchor_mode: TransactionAnchorMode::Any, - post_condition_mode: TransactionPostConditionMode::Allow, - post_conditions: vec![], - payload: TransactionPayload::TokenTransfer( - addr.to_account_principal(), - 123, - TokenTransferMemo([0u8; 34]), - ), - }; - tx.set_tx_fee(1000); - tx.set_origin_nonce(0); - - let mut tx_signer = StacksTransactionSigner::new(&tx); - tx_signer.sign_origin(&pk).unwrap(); - - let tx = tx_signer.get_tx().unwrap(); - - let txid = tx.txid(); - let tx_bytes = tx.serialize_to_vec(); - let origin_addr = tx.origin_address(); - let origin_nonce = tx.get_origin_nonce(); - let sponsor_addr = tx.sponsor_address().unwrap_or(origin_addr.clone()); - let sponsor_nonce = tx.get_sponsor_nonce().unwrap_or(origin_nonce); - let tx_fee = tx.get_tx_fee(); - - txs.insert(tx.txid(), tx.clone()); - - // should succeed - MemPoolDB::try_add_tx( - &mut mempool_tx, - peer_1.chainstate(), - &ConsensusHash([0x1 + (num_blocks as u8); 20]), - &BlockHeaderHash([0x2 + (num_blocks as u8); 32]), - txid.clone(), - tx_bytes, - tx_fee, - num_blocks, - &origin_addr, - origin_nonce, - &sponsor_addr, - sponsor_nonce, - None, - ) - .unwrap(); - - eprintln!("Added {} {}", i, &txid); - - if i % 2 == 0 { - // peer 2 blacklists even-numbered txs - peer_2_blacklist.push(txid); - } - } - mempool_tx.commit().unwrap(); - peer_1.mempool = Some(peer_1_mempool); - - // peer 2 blacklists them all - let mut peer_2_mempool = peer_2.mempool.take().unwrap(); - - // blacklisted txs never time out - peer_2_mempool.blacklist_timeout = u64::MAX / 2; - - let mempool_tx = peer_2_mempool.tx_begin().unwrap(); - MemPoolDB::inner_blacklist_txs(&mempool_tx, &peer_2_blacklist, get_epoch_time_secs()) - .unwrap(); - mempool_tx.commit().unwrap(); - - peer_2.mempool = Some(peer_2_mempool); - - let num_burn_blocks = { - let sn = - SortitionDB::get_canonical_burn_chain_tip(peer_1.sortdb.as_ref().unwrap().conn()) - .unwrap(); - sn.block_height + 1 - }; - - let mut round = 0; - let mut peer_1_mempool_txs = 0; - let mut peer_2_mempool_txs = 0; - - while peer_1_mempool_txs < num_txs || peer_2_mempool_txs < num_txs / 2 { - if let Ok(mut result) = peer_1.step_with_ibd(false) { - let lp = peer_1.network.local_peer.clone(); - peer_1 - .with_db_state(|sortdb, chainstate, relayer, mempool| { - relayer.process_network_result( - &lp, - &mut result, - sortdb, - chainstate, - mempool, - false, - None, - None, - ) - }) - .unwrap(); - } - - if let Ok(mut result) = peer_2.step_with_ibd(false) { - let lp = peer_2.network.local_peer.clone(); - peer_2 - .with_db_state(|sortdb, chainstate, relayer, mempool| { - relayer.process_network_result( - &lp, - &mut result, - sortdb, - chainstate, - mempool, - false, - None, - None, - ) - }) - .unwrap(); - } - - round += 1; - - let mp = peer_1.mempool.take().unwrap(); - peer_1_mempool_txs = MemPoolDB::get_all_txs(mp.conn()).unwrap().len(); - peer_1.mempool.replace(mp); - - let mp = peer_2.mempool.take().unwrap(); - peer_2_mempool_txs = MemPoolDB::get_all_txs(mp.conn()).unwrap().len(); - peer_2.mempool.replace(mp); - - info!( - "Peer 1: {}, Peer 2: {}", - peer_1_mempool_txs, peer_2_mempool_txs - ); - } - - info!("Completed mempool sync in {} step(s)", round); - - let mp = peer_2.mempool.take().unwrap(); - let peer_2_mempool_txs = MemPoolDB::get_all_txs(mp.conn()).unwrap(); - peer_2.mempool.replace(mp); - - for tx in peer_2_mempool_txs { - assert_eq!(&tx.tx, txs.get(&tx.tx.txid()).unwrap()); - assert!(!peer_2_blacklist.contains(&tx.tx.txid())); - } - } - - /// Make sure mempool sync never stores problematic transactions - #[test] - fn test_mempool_sync_2_peers_problematic() { - // peer 1 gets some transactions; peer 2 blacklists them all due to being invalid. - // verify peer 2 stores nothing. - let mut peer_1_config = TestPeerConfig::new(function_name!(), 2218, 2219); - let mut peer_2_config = TestPeerConfig::new(function_name!(), 2220, 2221); - - peer_1_config.add_neighbor(&peer_2_config.to_neighbor()); - peer_2_config.add_neighbor(&peer_1_config.to_neighbor()); - - peer_1_config.connection_opts.mempool_sync_interval = 1; - peer_2_config.connection_opts.mempool_sync_interval = 1; - - let num_txs = 128; - let pks: Vec<_> = (0..num_txs).map(|_| StacksPrivateKey::new()).collect(); - let addrs: Vec<_> = pks.iter().map(|pk| to_addr(pk)).collect(); - let initial_balances: Vec<_> = addrs - .iter() - .map(|a| (a.to_account_principal(), 1000000000)) - .collect(); - - peer_1_config.initial_balances = initial_balances.clone(); - peer_2_config.initial_balances = initial_balances.clone(); - + fn test_is_connecting() { + let peer_1_config = TestPeerConfig::new(function_name!(), 0, 0); let mut peer_1 = TestPeer::new(peer_1_config); - let mut peer_2 = TestPeer::new(peer_2_config); - - let num_blocks = 10; - let first_stacks_block_height = { - let sn = - SortitionDB::get_canonical_burn_chain_tip(&peer_1.sortdb.as_ref().unwrap().conn()) - .unwrap(); - sn.block_height + 1 - }; - - for i in 0..num_blocks { - let (burn_ops, stacks_block, microblocks) = peer_2.make_default_tenure(); - - peer_1.next_burnchain_block(burn_ops.clone()); - peer_2.next_burnchain_block(burn_ops.clone()); - - peer_1.process_stacks_epoch_at_tip(&stacks_block, µblocks); - peer_2.process_stacks_epoch_at_tip(&stacks_block, µblocks); - } - - let addr = StacksAddress { - version: C32_ADDRESS_VERSION_TESTNET_SINGLESIG, - bytes: Hash160([0xff; 20]), - }; - - // fill peer 1 with lots of transactions - let mut txs = HashMap::new(); - let mut peer_1_mempool = peer_1.mempool.take().unwrap(); - let mut mempool_tx = peer_1_mempool.tx_begin().unwrap(); - for i in 0..num_txs { - let pk = &pks[i]; - - let exceeds_repeat_factor = AST_CALL_STACK_DEPTH_BUFFER + (MAX_CALL_STACK_DEPTH as u64); - let tx_exceeds_body_start = "{ a : ".repeat(exceeds_repeat_factor as usize); - let tx_exceeds_body_end = "} ".repeat(exceeds_repeat_factor as usize); - let tx_exceeds_body = format!("{}u1 {}", tx_exceeds_body_start, tx_exceeds_body_end); - - let tx = make_contract_tx( - &pk, - 0, - (tx_exceeds_body.len() * 100) as u64, - "test-exceeds", - &tx_exceeds_body, - ); - - let txid = tx.txid(); - let tx_bytes = tx.serialize_to_vec(); - let origin_addr = tx.origin_address(); - let origin_nonce = tx.get_origin_nonce(); - let sponsor_addr = tx.sponsor_address().unwrap_or(origin_addr.clone()); - let sponsor_nonce = tx.get_sponsor_nonce().unwrap_or(origin_nonce); - let tx_fee = tx.get_tx_fee(); - - txs.insert(tx.txid(), tx.clone()); - - // should succeed - MemPoolDB::try_add_tx( - &mut mempool_tx, - peer_1.chainstate(), - &ConsensusHash([0x1 + (num_blocks as u8); 20]), - &BlockHeaderHash([0x2 + (num_blocks as u8); 32]), - txid.clone(), - tx_bytes, - tx_fee, - num_blocks, - &origin_addr, - origin_nonce, - &sponsor_addr, - sponsor_nonce, - None, - ) - .unwrap(); + let nk = peer_1.to_neighbor().addr; - eprintln!("Added {} {}", i, &txid); - } - mempool_tx.commit().unwrap(); - peer_1.mempool = Some(peer_1_mempool); - - // blacklisted txs never time out - let mut peer_2_mempool = peer_2.mempool.take().unwrap(); - peer_2_mempool.blacklist_timeout = u64::MAX / 2; - peer_2.mempool = Some(peer_2_mempool); - - let num_burn_blocks = { - let sn = - SortitionDB::get_canonical_burn_chain_tip(peer_1.sortdb.as_ref().unwrap().conn()) - .unwrap(); - sn.block_height + 1 - }; - - let mut round = 0; - let mut peer_1_mempool_txs = 0; - - while peer_1_mempool_txs < num_txs || peer_2.network.mempool_sync_txs < (num_txs as u64) { - if let Ok(mut result) = peer_1.step_with_ibd(false) { - let lp = peer_1.network.local_peer.clone(); - peer_1 - .with_db_state(|sortdb, chainstate, relayer, mempool| { - relayer.process_network_result( - &lp, - &mut result, - sortdb, - chainstate, - mempool, - false, - None, - None, - ) - }) - .unwrap(); - } - - if let Ok(mut result) = peer_2.step_with_ibd(false) { - let lp = peer_2.network.local_peer.clone(); - peer_2 - .with_db_state(|sortdb, chainstate, relayer, mempool| { - relayer.process_network_result( - &lp, - &mut result, - sortdb, - chainstate, - mempool, - false, - None, - None, - ) - }) - .unwrap(); - } + assert!(!peer_1.network.is_connecting(1)); + assert!(!peer_1.network.is_connecting_neighbor(&nk)); - round += 1; + let comms = PeerNetworkComms::new(); + assert!(!comms.is_neighbor_connecting(&peer_1.network, &nk)); - let mp = peer_1.mempool.take().unwrap(); - peer_1_mempool_txs = MemPoolDB::get_all_txs(mp.conn()).unwrap().len(); - peer_1.mempool.replace(mp); - - info!( - "Peer 1: {}, Peer 2: {}", - peer_1_mempool_txs, peer_2.network.mempool_sync_txs - ); - } - - info!("Completed mempool sync in {} step(s)", round); - - let mp = peer_2.mempool.take().unwrap(); - let peer_2_mempool_txs = MemPoolDB::get_all_txs(mp.conn()).unwrap(); - peer_2.mempool.replace(mp); + let sock = mio::net::TcpStream::connect(&SocketAddr::from(( + [127, 0, 0, 1], + peer_1.config.server_port, + ))) + .unwrap(); + peer_1.network.connecting.insert( + 1, + ConnectingPeer::new(sock, true, get_epoch_time_secs(), nk.clone()), + ); - assert_eq!(peer_2_mempool_txs.len(), 128); + assert!(peer_1.network.is_connecting(1)); + assert!(peer_1.network.is_connecting_neighbor(&nk)); + assert!(comms.is_neighbor_connecting(&peer_1.network, &nk)); } } diff --git a/stackslib/src/net/relay.rs b/stackslib/src/net/relay.rs index 28ff92ae585..32dc7d065ab 100644 --- a/stackslib/src/net/relay.rs +++ b/stackslib/src/net/relay.rs @@ -25,18 +25,24 @@ use clarity::vm::types::{QualifiedContractIdentifier, StacksAddressExtensions}; use clarity::vm::ClarityVersion; use rand::prelude::*; use rand::{thread_rng, Rng}; +use stacks_common::address::public_keys_to_address_hash; use stacks_common::codec::MAX_PAYLOAD_LEN; use stacks_common::types::chainstate::{BurnchainHeaderHash, PoxId, SortitionId, StacksBlockId}; -use stacks_common::types::StacksEpochId; +use stacks_common::types::{MempoolCollectionBehavior, StacksEpochId}; use stacks_common::util::get_epoch_time_secs; use stacks_common::util::hash::Sha512Trunc256Sum; -use wsts::curve::point::Point; use crate::burnchains::{Burnchain, BurnchainView}; -use crate::chainstate::burn::db::sortdb::{SortitionDB, SortitionDBConn, SortitionHandleConn}; +use crate::chainstate::burn::db::sortdb::{ + SortitionDB, SortitionDBConn, SortitionHandle, SortitionHandleConn, +}; use crate::chainstate::burn::{BlockSnapshot, ConsensusHash}; use crate::chainstate::coordinator::comm::CoordinatorChannels; -use crate::chainstate::coordinator::BlockEventDispatcher; +use crate::chainstate::coordinator::{ + BlockEventDispatcher, Error as CoordinatorError, OnChainRewardSetProvider, +}; +use crate::chainstate::nakamoto::coordinator::load_nakamoto_reward_set; +use crate::chainstate::nakamoto::staging_blocks::NakamotoBlockObtainMethod; use crate::chainstate::nakamoto::{NakamotoBlock, NakamotoBlockHeader, NakamotoChainState}; use crate::chainstate::stacks::db::unconfirmed::ProcessedUnconfirmedState; use crate::chainstate::stacks::db::{StacksChainState, StacksEpochReceipt, StacksHeaderInfo}; @@ -67,6 +73,8 @@ pub const RELAY_DUPLICATE_INFERENCE_WARMUP: usize = 128; pub struct Relayer { /// Connection to the p2p thread p2p: NetworkHandle, + /// connection options + connection_opts: ConnectionOptions, /// StackerDB connection stacker_dbs: StackerDBs, } @@ -77,12 +85,12 @@ pub struct RelayerStats { /// Note that we key on (addr, port), not the full NeighborAddress. /// (TODO: Nothing is done with this yet, but one day we'll use it to probe for network /// choke-points). - relay_stats: HashMap, - relay_updates: BTreeMap, + pub(crate) relay_stats: HashMap, + pub(crate) relay_updates: BTreeMap, /// Messages sent from each neighbor recently (includes duplicates) - recent_messages: HashMap>, - recent_updates: BTreeMap, + pub(crate) recent_messages: HashMap>, + pub(crate) recent_updates: BTreeMap, next_priority: u64, } @@ -93,6 +101,7 @@ pub struct ProcessedNetReceipts { pub num_new_blocks: u64, pub num_new_confirmed_microblocks: u64, pub num_new_unconfirmed_microblocks: u64, + pub num_new_nakamoto_blocks: u64, } /// A trait for implementing both mempool event observer methods and stackerdb methods. @@ -170,6 +179,16 @@ impl RelayPayload for StacksMicroblock { } } +impl RelayPayload for NakamotoBlock { + fn get_digest(&self) -> Sha512Trunc256Sum { + let h = self.block_id(); + Sha512Trunc256Sum(h.0) + } + fn get_id(&self) -> String { + format!("NakamotoBlock({})", self.block_id()) + } +} + impl RelayPayload for StacksTransaction { fn get_digest(&self) -> Sha512Trunc256Sum { let h = self.txid(); @@ -317,7 +336,7 @@ impl RelayerStats { } /// Map neighbors to the frequency of their AS numbers in the given neighbors list - fn count_ASNs( + pub(crate) fn count_ASNs( conn: &DBConn, neighbors: &[NeighborKey], ) -> Result, net_error> { @@ -442,7 +461,7 @@ impl RelayerStats { } for l in 0..count { - if norm <= 1 { + if norm == 0 { // just one option break; } @@ -461,8 +480,8 @@ impl RelayerStats { sampled += 1; // sample without replacement - rankings_vec[i].1 -= 1; - norm -= 1; + norm = norm.saturating_sub(rankings_vec[i].1); + rankings_vec[i].1 = 0; break; } } @@ -474,20 +493,35 @@ impl RelayerStats { } } +/// Processed result of pushed Nakamoto blocks +pub struct AcceptedNakamotoBlocks { + pub relayers: Vec, + pub blocks: Vec, +} + impl Relayer { - pub fn new(handle: NetworkHandle, stacker_dbs: StackerDBs) -> Relayer { + pub fn new( + handle: NetworkHandle, + connection_opts: ConnectionOptions, + stacker_dbs: StackerDBs, + ) -> Relayer { Relayer { p2p: handle, + connection_opts, stacker_dbs, } } pub fn from_p2p(network: &mut PeerNetwork, stacker_dbs: StackerDBs) -> Relayer { let handle = network.new_handle(1024); - Relayer::new(handle, stacker_dbs) + Relayer::new(handle, network.connection_opts.clone(), stacker_dbs) + } + + pub fn get_p2p_handle(&self) -> NetworkHandle { + self.p2p.clone() } - /// Given blocks pushed to us, verify that they correspond to expected block data. + /// Given Stacks 2.x blocks pushed to us, verify that they correspond to expected block data. pub fn validate_blocks_push( conn: &SortitionDBConn, blocks_data: &BlocksData, @@ -518,10 +552,118 @@ impl Relayer { "No such sortition in block with consensus hash {}", consensus_hash ); + return Err(net_error::InvalidMessage); + } + } + Ok(()) + } + + /// Given Nakamoto blocks pushed to us, verify that they correspond to expected block data. + pub fn validate_nakamoto_blocks_push( + burnchain: &Burnchain, + sortdb: &SortitionDB, + chainstate: &mut StacksChainState, + stacks_tip: &StacksBlockId, + nakamoto_blocks_data: &NakamotoBlocksData, + ) -> Result<(), net_error> { + let conn = sortdb.index_conn(); + let mut loaded_reward_sets = HashMap::new(); + let tip_sn = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn())?; + + for nakamoto_block in nakamoto_blocks_data.blocks.iter() { + // is this the right Stacks block for this sortition? + let Some(sn) = SortitionDB::get_block_snapshot_consensus( + conn.conn(), + &nakamoto_block.header.consensus_hash, + )? + else { + // don't know this sortition yet + continue; + }; + + if !sn.pox_valid { + info!( + "Pushed block from consensus hash {} corresponds to invalid PoX state", + nakamoto_block.header.consensus_hash + ); + continue; + } + + if !sn.sortition { + info!( + "No such sortition in block with consensus hash {}", + &nakamoto_block.header.consensus_hash + ); + return Err(net_error::InvalidMessage); + } + + // is the block signed by the active reward set? + let sn_rc = burnchain + .pox_reward_cycle(sn.block_height) + .expect("FATAL: sortition has no reward cycle"); + let reward_cycle_info = if let Some(rc_info) = loaded_reward_sets.get(&sn_rc) { + rc_info + } else { + let Some((reward_set_info, _)) = load_nakamoto_reward_set( + sn_rc, + &tip_sn.sortition_id, + burnchain, + chainstate, + stacks_tip, + sortdb, + &OnChainRewardSetProvider::new(), + ) + .map_err(|e| { + error!( + "Failed to load reward cycle info for cycle {}: {:?}", + sn_rc, &e + ); + match e { + CoordinatorError::ChainstateError(e) => { + error!( + "No RewardCycleInfo loaded for tip {}: {:?}", + &sn.consensus_hash, &e + ); + net_error::ChainstateError(format!("{:?}", &e)) + } + CoordinatorError::DBError(e) => { + error!( + "No RewardCycleInfo loaded for tip {}: {:?}", + &sn.consensus_hash, &e + ); + net_error::DBError(e) + } + _ => { + error!( + "Failed to load RewardCycleInfo for tip {}: {:?}", + &sn.consensus_hash, &e + ); + net_error::NoPoXRewardSet(sn_rc) + } + } + })? + else { + error!("No reward set for reward cycle {}", &sn_rc); + return Err(net_error::NoPoXRewardSet(sn_rc)); + }; + + loaded_reward_sets.insert(sn_rc, reward_set_info); + loaded_reward_sets.get(&sn_rc).expect("FATAL: infallible") + }; - // TODO: once PoX is implemented, this can be permitted if we're missing the reward - // window's anchor block for the reward window in which this block lives. Until - // then, it's never okay -- this peer shall be considered broken. + let Some(reward_set) = reward_cycle_info.known_selected_anchor_block() else { + error!("No reward set for reward cycle {}", &sn_rc); + return Err(net_error::NoPoXRewardSet(sn_rc)); + }; + + if let Err(e) = nakamoto_block.header.verify_signer_signatures(reward_set) { + warn!( + "Signature verification failure for Nakamoto block"; + "consensus_hash" => %nakamoto_block.header.consensus_hash, + "block_hash" => %nakamoto_block.header.block_hash(), + "reward_cycle" => sn_rc, + "error" => %e.to_string() + ); return Err(net_error::InvalidMessage); } } @@ -653,11 +795,14 @@ impl Relayer { /// downloaded by us, or pushed via p2p. /// Return Ok(true) if we stored it, Ok(false) if we didn't pub fn process_new_nakamoto_block( + burnchain: &Burnchain, sortdb: &SortitionDB, sort_handle: &mut SortitionHandleConn, chainstate: &mut StacksChainState, - block: NakamotoBlock, + stacks_tip: &StacksBlockId, + block: &NakamotoBlock, coord_comms: Option<&CoordinatorChannels>, + obtained_method: NakamotoBlockObtainMethod, ) -> Result { debug!( "Handle incoming Nakamoto block {}/{}", @@ -668,7 +813,16 @@ impl Relayer { // do we have this block? don't lock the DB needlessly if so. if chainstate .nakamoto_blocks_db() - .has_nakamoto_block(&block.header.block_id())? + .has_nakamoto_block_with_index_hash(&block.header.block_id()) + .map_err(|e| { + warn!( + "Failed to determine if we have Nakamoto block {}/{}: {:?}", + &block.header.consensus_hash, + &block.header.block_hash(), + &e + ); + e + })? { debug!("Already have Nakamoto block {}", &block.header.block_id()); return Ok(false); @@ -676,7 +830,13 @@ impl Relayer { let block_sn = SortitionDB::get_block_snapshot_consensus(sort_handle, &block.header.consensus_hash)? - .ok_or(chainstate_error::DBError(db_error::NotFoundError))?; + .ok_or_else(|| { + debug!( + "Failed to load snapshot for consensus hash {}", + &block.header.consensus_hash + ); + chainstate_error::DBError(db_error::NotFoundError) + })?; // NOTE: it's `+ 1` because the first Nakamoto block is built atop the last epoch 2.x // tenure, right after the last 2.x sortition @@ -686,9 +846,9 @@ impl Relayer { if epoch_id < StacksEpochId::Epoch30 { error!("Nakamoto blocks are not supported in this epoch"); - return Err(chainstate_error::InvalidStacksBlock( - "Nakamoto blocks are not supported in this epoch".into(), - )); + return Err(chainstate_error::InvalidStacksBlock(format!( + "Nakamoto blocks are not supported in this epoch: {epoch_id}" + ))); } // don't relay this block if it's using the wrong AST rules (this would render at least one of its @@ -721,17 +881,47 @@ impl Relayer { ); let config = chainstate.config(); - let Ok(aggregate_public_key) = - NakamotoChainState::get_aggregate_public_key(chainstate, &sortdb, sort_handle, &block) - else { - warn!("Failed to get aggregate public key. Will not store or relay"; - "stacks_block_hash" => %block.header.block_hash(), - "consensus_hash" => %block.header.consensus_hash, - "burn_height" => block.header.chain_length, - "sortition_height" => block_sn.block_height, - ); - return Ok(false); + let tip = block_sn.sortition_id; + + let reward_info = match load_nakamoto_reward_set( + burnchain + .pox_reward_cycle(block_sn.block_height) + .expect("FATAL: block snapshot has no reward cycle"), + &tip, + burnchain, + chainstate, + stacks_tip, + sortdb, + &OnChainRewardSetProvider::new(), + ) { + Ok(Some((reward_info, ..))) => reward_info, + Ok(None) => { + error!("No RewardCycleInfo found for tip {}", tip); + return Err(chainstate_error::PoxNoRewardCycle); + } + Err(CoordinatorError::DBError(db_error::NotFoundError)) => { + error!("No RewardCycleInfo found for tip {}", tip); + return Err(chainstate_error::PoxNoRewardCycle); + } + Err(CoordinatorError::ChainstateError(e)) => { + error!("No RewardCycleInfo loaded for tip {}: {:?}", tip, &e); + return Err(e); + } + Err(CoordinatorError::DBError(e)) => { + error!("No RewardCycleInfo loaded for tip {}: {:?}", tip, &e); + return Err(chainstate_error::DBError(e)); + } + Err(e) => { + error!("Failed to load RewardCycleInfo for tip {}: {:?}", tip, &e); + return Err(chainstate_error::PoxNoRewardCycle); + } + }; + let reward_cycle = reward_info.reward_cycle; + + let Some(reward_set) = reward_info.known_selected_anchor_block_owned() else { + return Err(chainstate_error::NoRegisteredSigners(reward_cycle)); }; + let (headers_conn, staging_db_tx) = chainstate.headers_conn_and_staging_tx_begin()?; let accepted = NakamotoChainState::accept_block( &config, @@ -739,7 +929,8 @@ impl Relayer { sort_handle, &staging_db_tx, headers_conn, - &aggregate_public_key, + reward_set, + obtained_method, )?; staging_db_tx.commit()?; @@ -757,29 +948,39 @@ impl Relayer { Ok(accepted) } - /// Process nakamoto blocks. + #[cfg_attr(test, mutants::skip)] + /// Process nakamoto blocks that we downloaded. /// Log errors but do not return them. - pub fn process_nakamoto_blocks( + /// Returns the list of blocks we accepted. + pub fn process_downloaded_nakamoto_blocks( + burnchain: &Burnchain, sortdb: &SortitionDB, chainstate: &mut StacksChainState, + stacks_tip: &StacksBlockId, blocks: impl Iterator, coord_comms: Option<&CoordinatorChannels>, - ) -> Result<(), chainstate_error> { + ) -> Result, chainstate_error> { + let mut accepted = vec![]; let tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn())?; let mut sort_handle = sortdb.index_handle(&tip.sortition_id); for block in blocks { let block_id = block.block_id(); if let Err(e) = Self::process_new_nakamoto_block( + burnchain, sortdb, &mut sort_handle, chainstate, - block, + stacks_tip, + &block, coord_comms, + NakamotoBlockObtainMethod::Downloaded, ) { warn!("Failed to process Nakamoto block {}: {:?}", &block_id, &e); + } else { + accepted.push(block); } } - Ok(()) + Ok(accepted) } /// Coalesce a set of microblocks into relayer hints and MicroblocksData messages, as calculated by @@ -1343,6 +1544,102 @@ impl Relayer { Ok((mblock_datas, bad_neighbors)) } + #[cfg_attr(test, mutants::skip)] + /// Preprocess all pushed Nakamoto blocks + /// Return the Nakamoto blocks we can accept (and who relayed them), as well as the + /// list of peers that served us invalid data. + pub(crate) fn process_pushed_nakamoto_blocks( + network_result: &mut NetworkResult, + burnchain: &Burnchain, + sortdb: &mut SortitionDB, + chainstate: &mut StacksChainState, + coord_comms: Option<&CoordinatorChannels>, + ) -> Result<(Vec, Vec), net_error> { + let mut pushed_blocks = vec![]; + let mut bad_neighbors = vec![]; + let tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn())?; + + // process Nakamoto blocks pushed to us. + // If a neighbor sends us an invalid Nakamoto block, then ban them. + for (neighbor_key, relayers_and_block_data) in + network_result.pushed_nakamoto_blocks.iter_mut() + { + for (relayers, nakamoto_blocks_data) in relayers_and_block_data.iter_mut() { + let mut accepted_blocks = vec![]; + if let Err(e) = Relayer::validate_nakamoto_blocks_push( + burnchain, + sortdb, + chainstate, + &network_result.stacks_tip, + nakamoto_blocks_data, + ) { + info!( + "Failed to validate Nakamoto blocks pushed from {:?}: {:?}", + neighbor_key, &e + ); + + // punish this peer + bad_neighbors.push((*neighbor_key).clone()); + break; + } + + for nakamoto_block in nakamoto_blocks_data.blocks.drain(..) { + let block_id = nakamoto_block.block_id(); + debug!( + "Received pushed Nakamoto block {} from {}", + block_id, neighbor_key + ); + let mut sort_handle = sortdb.index_handle(&tip.sortition_id); + match Self::process_new_nakamoto_block( + burnchain, + sortdb, + &mut sort_handle, + chainstate, + &network_result.stacks_tip, + &nakamoto_block, + coord_comms, + NakamotoBlockObtainMethod::Pushed, + ) { + Ok(accepted) => { + if accepted { + debug!( + "Accepted Nakamoto block {} ({}) from {}", + &block_id, &nakamoto_block.header.consensus_hash, neighbor_key + ); + accepted_blocks.push(nakamoto_block); + } else { + warn!( + "Rejected Nakamoto block {} ({}) from {}", + &block_id, &nakamoto_block.header.consensus_hash, &neighbor_key, + ); + } + } + Err(chainstate_error::InvalidStacksBlock(msg)) => { + warn!("Invalid pushed Nakamoto block {}: {}", &block_id, msg); + bad_neighbors.push((*neighbor_key).clone()); + break; + } + Err(e) => { + warn!( + "Could not process pushed Nakamoto block {}: {:?}", + &block_id, &e + ); + } + } + } + + if accepted_blocks.len() > 0 { + pushed_blocks.push(AcceptedNakamotoBlocks { + relayers: relayers.clone(), + blocks: accepted_blocks, + }); + } + } + } + + Ok((pushed_blocks, bad_neighbors)) + } + /// Verify that a relayed transaction is not problematic. This is a static check -- we only /// look at the tx contents. /// @@ -1464,7 +1761,7 @@ impl Relayer { /// Verify that a relayed microblock is not problematic -- i.e. it doesn't contain any /// problematic transactions. This is a static check -- we only look at the microblock /// contents. - /// + /// /// Returns true if the check passed -- i.e. no problems. /// Returns false if not pub fn static_check_problematic_relayed_microblock( @@ -1634,6 +1931,91 @@ impl Relayer { )) } + #[cfg_attr(test, mutants::skip)] + /// Process new Nakamoto blocks, both pushed and downloaded. + /// Returns the list of Nakamoto blocks we stored, as well as the list of bad neighbors that + /// sent us invalid blocks. + pub fn process_new_nakamoto_blocks( + network_result: &mut NetworkResult, + burnchain: &Burnchain, + sortdb: &mut SortitionDB, + chainstate: &mut StacksChainState, + coord_comms: Option<&CoordinatorChannels>, + ) -> Result<(Vec, Vec), net_error> { + // process downloaded Nakamoto blocks. + // We treat them as singleton blocks fetched via zero relayers + let nakamoto_blocks = + std::mem::replace(&mut network_result.nakamoto_blocks, HashMap::new()); + let mut accepted_nakamoto_blocks_and_relayers = + match Self::process_downloaded_nakamoto_blocks( + burnchain, + sortdb, + chainstate, + &network_result.stacks_tip, + nakamoto_blocks.into_values(), + coord_comms, + ) { + Ok(accepted) => vec![AcceptedNakamotoBlocks { + relayers: vec![], + blocks: accepted, + }], + Err(e) => { + warn!("Failed to process downloaded Nakamoto blocks: {:?}", &e); + vec![] + } + }; + + // process pushed Nakamoto blocks + let (pushed_blocks_and_relayers, bad_neighbors) = match Self::process_pushed_nakamoto_blocks( + network_result, + burnchain, + sortdb, + chainstate, + coord_comms, + ) { + Ok(x) => x, + Err(e) => { + warn!("Failed to process pushed Nakamoto blocks: {:?}", &e); + (vec![], vec![]) + } + }; + + let mut http_uploaded_blocks = vec![]; + for block in network_result.uploaded_nakamoto_blocks.drain(..) { + let block_id = block.block_id(); + let have_block = chainstate + .nakamoto_blocks_db() + .has_nakamoto_block_with_index_hash(&block_id) + .unwrap_or_else(|e| { + warn!( + "Failed to determine if we have Nakamoto block"; + "stacks_block_id" => %block_id, + "err" => ?e + ); + false + }); + if have_block { + debug!( + "Received http-uploaded nakamoto block"; + "stacks_block_id" => %block_id, + ); + http_uploaded_blocks.push(block); + } + } + if !http_uploaded_blocks.is_empty() { + coord_comms.inspect(|comm| { + comm.announce_new_stacks_block(); + }); + } + + accepted_nakamoto_blocks_and_relayers.extend(pushed_blocks_and_relayers); + accepted_nakamoto_blocks_and_relayers.push(AcceptedNakamotoBlocks { + relayers: vec![], + blocks: http_uploaded_blocks, + }); + Ok((accepted_nakamoto_blocks_and_relayers, bad_neighbors)) + } + /// Produce blocks-available messages from blocks we just got. pub fn load_blocks_available_data( sortdb: &SortitionDB, @@ -1717,7 +2099,7 @@ impl Relayer { /// Store all new transactions we received, and return the list of transactions that we need to /// forward (as well as their relay hints). Also, garbage-collect the mempool. - fn process_transactions( + pub(crate) fn process_transactions( network_result: &mut NetworkResult, sortdb: &SortitionDB, chainstate: &mut StacksChainState, @@ -1767,18 +2149,12 @@ impl Relayer { ret.push((vec![], tx.clone())); } - // garbage-collect - if chain_height > MEMPOOL_MAX_TRANSACTION_AGE { - let min_height = chain_height.saturating_sub(MEMPOOL_MAX_TRANSACTION_AGE); - let mut mempool_tx = mempool.tx_begin()?; + mempool.garbage_collect( + chain_height, + &epoch_id.mempool_garbage_behavior(), + event_observer, + )?; - debug!( - "Remove all transactions beneath block height {}", - min_height - ); - MemPoolDB::garbage_collect(&mut mempool_tx, min_height, event_observer)?; - mempool_tx.commit()?; - } update_stacks_tip_height(chain_height as i64); Ok(ret) @@ -1839,8 +2215,10 @@ impl Relayer { "Reload unconfirmed state off of {}/{}", &canonical_consensus_hash, &canonical_block_hash ); - let processed_unconfirmed_state = - chainstate.reload_unconfirmed_state(&sortdb.index_conn(), canonical_tip)?; + let processed_unconfirmed_state = chainstate.reload_unconfirmed_state( + &sortdb.index_handle_at_block(chainstate, &canonical_tip)?, + canonical_tip, + )?; Ok(processed_unconfirmed_state) } @@ -2005,31 +2383,88 @@ impl Relayer { ) } - /// Given a network result, consume and store all data. - /// * Add all blocks and microblocks to staging. - /// * Forward BlocksAvailable messages to neighbors for newly-discovered anchored blocks - /// * Forward MicroblocksAvailable messages to neighbors for newly-discovered confirmed microblock streams - /// * Forward along unconfirmed microblocks that we didn't already have - /// * Add all transactions to the mempool. - /// * Forward transactions we didn't already have. - /// * Reload the unconfirmed state, if necessary. - /// Mask errors from invalid data -- all errors due to invalid blocks and invalid data should be captured, and - /// turned into peer bans. - pub fn process_network_result( + /// Relay epoch2 block data + fn relay_epoch2_blocks( + &mut self, + _local_peer: &LocalPeer, + sortdb: &SortitionDB, + new_blocks: HashMap, + new_confirmed_microblocks: HashMap)>, + new_microblocks: Vec<(Vec, MicroblocksData)>, + ) { + // have the p2p thread tell our neighbors about newly-discovered blocks + let new_block_chs = new_blocks.iter().map(|(ch, _)| ch.clone()).collect(); + let available = Relayer::load_blocks_available_data(sortdb, new_block_chs) + .unwrap_or(BlocksAvailableMap::new()); + if available.len() > 0 { + debug!("{:?}: Blocks available: {}", &_local_peer, available.len()); + if let Err(e) = self.p2p.advertize_blocks(available, new_blocks) { + warn!("Failed to advertize new blocks: {:?}", &e); + } + } + + // have the p2p thread tell our neighbors about newly-discovered confirmed microblock streams + let new_mblock_chs = new_confirmed_microblocks + .iter() + .map(|(ch, _)| ch.clone()) + .collect(); + let mblocks_available = Relayer::load_blocks_available_data(sortdb, new_mblock_chs) + .unwrap_or(BlocksAvailableMap::new()); + if mblocks_available.len() > 0 { + debug!( + "{:?}: Confirmed microblock streams available: {}", + &_local_peer, + mblocks_available.len() + ); + if let Err(e) = self + .p2p + .advertize_microblocks(mblocks_available, new_confirmed_microblocks) + { + warn!("Failed to advertize new confirmed microblocks: {:?}", &e); + } + } + + // have the p2p thread forward all new unconfirmed microblocks + if new_microblocks.len() > 0 { + debug!( + "{:?}: Unconfirmed microblocks: {}", + &_local_peer, + new_microblocks.len() + ); + for (relayers, mblocks_msg) in new_microblocks.into_iter() { + debug!( + "{:?}: Send {} microblocks for {}", + &_local_peer, + mblocks_msg.microblocks.len(), + &mblocks_msg.index_anchor_block + ); + let msg = StacksMessageType::Microblocks(mblocks_msg); + if let Err(e) = self.p2p.broadcast_message(relayers, msg) { + warn!("Failed to broadcast microblock: {:?}", &e); + } + } + } + } + + #[cfg_attr(test, mutants::skip)] + /// Process epoch2 block data. + /// Relays blocks and microblocks as needed + /// Returns (num new blocks, num new confirmed microblocks, num new unconfirmed microblocks) + fn process_new_epoch2_blocks( &mut self, _local_peer: &LocalPeer, network_result: &mut NetworkResult, sortdb: &mut SortitionDB, chainstate: &mut StacksChainState, - mempool: &mut MemPoolDB, ibd: bool, coord_comms: Option<&CoordinatorChannels>, - event_observer: Option<&dyn RelayEventDispatcher>, - ) -> Result { + ) -> (u64, u64, u64) { let mut num_new_blocks = 0; let mut num_new_confirmed_microblocks = 0; let mut num_new_unconfirmed_microblocks = 0; - match Relayer::process_new_blocks(network_result, sortdb, chainstate, coord_comms) { + + // Process epoch2 data + match Self::process_new_blocks(network_result, sortdb, chainstate, coord_comms) { Ok((new_blocks, new_confirmed_microblocks, new_microblocks, bad_block_neighbors)) => { // report quantities of new data in the receipts num_new_blocks = new_blocks.len() as u64; @@ -2051,118 +2486,308 @@ impl Relayer { // only relay if not ibd if !ibd { - // have the p2p thread tell our neighbors about newly-discovered blocks - let new_block_chs = new_blocks.iter().map(|(ch, _)| ch.clone()).collect(); - let available = Relayer::load_blocks_available_data(sortdb, new_block_chs)?; - if available.len() > 0 { - debug!("{:?}: Blocks available: {}", &_local_peer, available.len()); - if let Err(e) = self.p2p.advertize_blocks(available, new_blocks) { - warn!("Failed to advertize new blocks: {:?}", &e); - } - } - - // have the p2p thread tell our neighbors about newly-discovered confirmed microblock streams - let new_mblock_chs = new_confirmed_microblocks - .iter() - .map(|(ch, _)| ch.clone()) - .collect(); - let mblocks_available = - Relayer::load_blocks_available_data(sortdb, new_mblock_chs)?; - if mblocks_available.len() > 0 { - debug!( - "{:?}: Confirmed microblock streams available: {}", - &_local_peer, - mblocks_available.len() - ); - if let Err(e) = self - .p2p - .advertize_microblocks(mblocks_available, new_confirmed_microblocks) - { - warn!("Failed to advertize new confirmed microblocks: {:?}", &e); - } - } - - // have the p2p thread forward all new unconfirmed microblocks - if new_microblocks.len() > 0 { - debug!( - "{:?}: Unconfirmed microblocks: {}", - &_local_peer, - new_microblocks.len() - ); - for (relayers, mblocks_msg) in new_microblocks.into_iter() { - debug!( - "{:?}: Send {} microblocks for {}", - &_local_peer, - mblocks_msg.microblocks.len(), - &mblocks_msg.index_anchor_block - ); - let msg = StacksMessageType::Microblocks(mblocks_msg); - if let Err(e) = self.p2p.broadcast_message(relayers, msg) { - warn!("Failed to broadcast microblock: {:?}", &e); - } - } - } + self.relay_epoch2_blocks( + _local_peer, + sortdb, + new_blocks, + new_confirmed_microblocks, + new_microblocks, + ); } } Err(e) => { warn!("Failed to process new blocks: {:?}", &e); } - }; - - let nakamoto_blocks = - std::mem::replace(&mut network_result.nakamoto_blocks, HashMap::new()); - if let Err(e) = Relayer::process_nakamoto_blocks( - sortdb, - chainstate, - nakamoto_blocks.into_values(), - coord_comms, - ) { - warn!("Failed to process Nakamoto blocks: {:?}", &e); } + ( + num_new_blocks, + num_new_confirmed_microblocks, + num_new_unconfirmed_microblocks, + ) + } - let mut mempool_txs_added = vec![]; + #[cfg_attr(test, mutants::skip)] + /// Get the last N sortitions, in order from the sortition tip to the n-1st ancestor + pub fn get_last_n_sortitions( + sortdb: &SortitionDB, + n: u64, + ) -> Result, chainstate_error> { + let mut ret = vec![]; + let sort_tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn())?; + ret.push(sort_tip); + + for _i in 0..(n.saturating_sub(1)) { + let last_sn_parent_sortition_id = ret + .last() + .map(|sn| sn.parent_sortition_id.clone()) + .expect("Infallible -- ret is non-empty"); + let sn = SortitionDB::get_block_snapshot(sortdb.conn(), &last_sn_parent_sortition_id)? + .ok_or(db_error::NotFoundError)?; + ret.push(sn); + } + Ok(ret) + } - // only care about transaction forwarding if not IBD - if !ibd { - // store all transactions, and forward the novel ones to neighbors - test_debug!( - "{:?}: Process {} transaction(s)", - &_local_peer, - network_result.pushed_transactions.len() - ); - let new_txs = Relayer::process_transactions( - network_result, - sortdb, - chainstate, - mempool, - event_observer.map(|obs| obs.as_mempool_event_dispatcher()), - )?; + #[cfg_attr(test, mutants::skip)] + /// Relay Nakamoto blocks. + /// By default, only sends them if we don't have them yet. + /// This can be overridden by setting `force_send` to true. + pub fn relay_epoch3_blocks( + &mut self, + _local_peer: &LocalPeer, + sortdb: &SortitionDB, + chainstate: &StacksChainState, + accepted_blocks: Vec, + force_send: bool, + ) { + debug!( + "{:?}: relay {} sets of Nakamoto blocks", + _local_peer, + accepted_blocks.len() + ); - if new_txs.len() > 0 { - debug!( - "{:?}: Send {} transactions to neighbors", - &_local_peer, - new_txs.len() + // the relay strategy is to only send blocks that are within + // `connection_opts.max_nakamoto_block_relay_age`, which is the number of + // burnchain sortitions that have happened since its tenure began. The + // intuition is that nodes that are in IBD will be downloading blocks anyway, + // but nodes that are at or near the chain tip would benefit from having blocks + // pushed to them. + let Ok(relay_sortitions) = + Self::get_last_n_sortitions(sortdb, self.connection_opts.max_nakamoto_block_relay_age) + .map_err(|e| warn!("Failed to load last N sortitions: {:?}", &e)) + else { + return; + }; + + let relay_tenures: HashSet<_> = relay_sortitions + .into_iter() + .map(|sn| sn.consensus_hash) + .collect(); + + for blocks_and_relayers in accepted_blocks.into_iter() { + let AcceptedNakamotoBlocks { relayers, blocks } = blocks_and_relayers; + + let relay_blocks: Vec<_> = blocks + .into_iter() + .filter(|blk| { + // don't relay blocks for non-recent tenures + if !relay_tenures.contains(&blk.header.consensus_hash) { + test_debug!( + "Do not relay {} -- {} is not recent", + &blk.header.block_id(), + &blk.header.consensus_hash + ); + return false; + } + // don't relay blocks we already have. + // If we have a DB error in figuring this out, then don't relay by + // default (lest a faulty DB cause the node to spam the network). + if !force_send + && chainstate + .nakamoto_blocks_db() + .has_nakamoto_block_with_index_hash(&blk.block_id()) + .unwrap_or(true) + { + return false; + } + true + }) + .collect(); + + debug!( + "{:?}: Forward {} Nakamoto blocks from {:?}", + _local_peer, + relay_blocks.len(), + &relayers + ); + + if relay_blocks.len() == 0 { + continue; + } + + for _block in relay_blocks.iter() { + test_debug!( + "{:?}: Forward Nakamoto block {}/{}", + _local_peer, + &_block.header.consensus_hash, + &_block.header.block_hash() ); } - for (relayers, tx) in new_txs.into_iter() { - debug!("{:?}: Broadcast tx {}", &_local_peer, &tx.txid()); - mempool_txs_added.push(tx.clone()); - let msg = StacksMessageType::Transaction(tx); - if let Err(e) = self.p2p.broadcast_message(relayers, msg) { - warn!("Failed to broadcast transaction: {:?}", &e); - } + let msg = StacksMessageType::NakamotoBlocks(NakamotoBlocksData { + blocks: relay_blocks, + }); + if let Err(e) = self.p2p.broadcast_message(relayers, msg) { + warn!("Failed to broadcast Nakamoto blocks: {:?}", &e); + } + } + } + + #[cfg_attr(test, mutants::skip)] + /// Process epoch3 data + /// Relay new nakamoto blocks if not in ibd + /// Returns number of new nakamoto blocks, up to u64::MAX + pub fn process_new_epoch3_blocks( + &mut self, + local_peer: &LocalPeer, + network_result: &mut NetworkResult, + burnchain: &Burnchain, + sortdb: &mut SortitionDB, + chainstate: &mut StacksChainState, + ibd: bool, + coord_comms: Option<&CoordinatorChannels>, + ) -> u64 { + let (accepted_blocks, bad_neighbors) = match Self::process_new_nakamoto_blocks( + network_result, + burnchain, + sortdb, + chainstate, + coord_comms, + ) { + Ok(x) => x, + Err(e) => { + warn!("Failed to process new Nakamoto blocks: {:?}", &e); + return 0; + } + }; + + let num_new_nakamoto_blocks = accepted_blocks + .iter() + .fold(0, |acc, accepted| acc + accepted.blocks.len()) + .try_into() + .unwrap_or(u64::MAX); // don't panic if we somehow receive more than u64::MAX blocks + + // punish bad peers + if bad_neighbors.len() > 0 { + debug!("{:?}: Ban {} peers", &local_peer, bad_neighbors.len()); + if let Err(e) = self.p2p.ban_peers(bad_neighbors) { + warn!("Failed to ban bad-block peers: {:?}", &e); + } + } + + // relay if not IBD + if !ibd && accepted_blocks.len() > 0 { + self.relay_epoch3_blocks(local_peer, sortdb, chainstate, accepted_blocks, false); + } + num_new_nakamoto_blocks + } + + #[cfg_attr(test, mutants::skip)] + /// Process new transactions + /// Returns the list of accepted txs + pub fn process_new_transactions( + &mut self, + _local_peer: &LocalPeer, + network_result: &mut NetworkResult, + sortdb: &mut SortitionDB, + chainstate: &mut StacksChainState, + mempool: &mut MemPoolDB, + ibd: bool, + event_observer: Option<&dyn RelayEventDispatcher>, + ) -> Vec { + if ibd { + // don't do anything + return vec![]; + } + + // only care about transaction forwarding if not IBD. + // store all transactions, and forward the novel ones to neighbors + let mut mempool_txs_added = vec![]; + test_debug!( + "{:?}: Process {} transaction(s)", + &_local_peer, + network_result.pushed_transactions.len() + ); + let new_txs = Relayer::process_transactions( + network_result, + sortdb, + chainstate, + mempool, + event_observer.map(|obs| obs.as_mempool_event_dispatcher()), + ) + .unwrap_or(vec![]); + + if new_txs.len() > 0 { + debug!( + "{:?}: Send {} transactions to neighbors", + &_local_peer, + new_txs.len() + ); + } + + for (relayers, tx) in new_txs.into_iter() { + debug!("{:?}: Broadcast tx {}", &_local_peer, &tx.txid()); + mempool_txs_added.push(tx.clone()); + let msg = StacksMessageType::Transaction(tx); + if let Err(e) = self.p2p.broadcast_message(relayers, msg) { + warn!("Failed to broadcast transaction: {:?}", &e); } } + mempool_txs_added + } + + /// Given a network result, consume and store all data. + /// * Add all blocks and microblocks to staging. + /// * Forward BlocksAvailable messages to neighbors for newly-discovered anchored blocks + /// * Forward MicroblocksAvailable messages to neighbors for newly-discovered confirmed microblock streams + /// * Forward along unconfirmed microblocks that we didn't already have + /// * Add all transactions to the mempool. + /// * Forward transactions we didn't already have. + /// * Reload the unconfirmed state, if necessary. + /// Mask errors from invalid data -- all errors due to invalid blocks and invalid data should be captured, and + /// turned into peer bans. + pub fn process_network_result( + &mut self, + local_peer: &LocalPeer, + network_result: &mut NetworkResult, + burnchain: &Burnchain, + sortdb: &mut SortitionDB, + chainstate: &mut StacksChainState, + mempool: &mut MemPoolDB, + ibd: bool, + coord_comms: Option<&CoordinatorChannels>, + event_observer: Option<&dyn RelayEventDispatcher>, + ) -> Result { + // process epoch2 data + let (num_new_blocks, num_new_confirmed_microblocks, num_new_unconfirmed_microblocks) = self + .process_new_epoch2_blocks( + local_peer, + network_result, + sortdb, + chainstate, + ibd, + coord_comms, + ); + + // process epoch3 data + let num_new_nakamoto_blocks = self.process_new_epoch3_blocks( + local_peer, + network_result, + burnchain, + sortdb, + chainstate, + ibd, + coord_comms, + ); - let mut processed_unconfirmed_state = Default::default(); + // process transactions + let mempool_txs_added = self.process_new_transactions( + local_peer, + network_result, + sortdb, + chainstate, + mempool, + ibd, + event_observer, + ); // finally, refresh the unconfirmed chainstate, if need be. // only bother if we're not in IBD; otherwise this is a waste of time - if network_result.has_microblocks() && !ibd { - processed_unconfirmed_state = Relayer::refresh_unconfirmed(chainstate, sortdb); - } + let processed_unconfirmed_state = if network_result.has_microblocks() && !ibd { + Relayer::refresh_unconfirmed(chainstate, sortdb) + } else { + Default::default() + }; // push events for HTTP-uploaded stacker DB chunks Relayer::process_uploaded_stackerdb_chunks( @@ -2192,6 +2817,7 @@ impl Relayer { num_new_blocks, num_new_confirmed_microblocks, num_new_unconfirmed_microblocks, + num_new_nakamoto_blocks, }; Ok(receipts) @@ -2609,3677 +3235,19 @@ impl PeerNetwork { } } - for (nk, txs) in network_result.pushed_transactions.iter() { - for (_, tx) in txs.iter() { - self.relayer_stats.add_relayed_message((*nk).clone(), tx); + for (nk, nakamoto_data) in network_result.pushed_nakamoto_blocks.iter() { + for (_, nakamoto_msg) in nakamoto_data.iter() { + for nakamoto_block in nakamoto_msg.blocks.iter() { + self.relayer_stats + .add_relayed_message((*nk).clone(), nakamoto_block); + } } } - } -} - -#[cfg(test)] -pub mod test { - use std::cell::RefCell; - use std::collections::HashMap; - - use clarity::vm::ast::stack_depth_checker::AST_CALL_STACK_DEPTH_BUFFER; - use clarity::vm::ast::ASTRules; - use clarity::vm::costs::LimitedCostTracker; - use clarity::vm::database::ClarityDatabase; - use clarity::vm::types::QualifiedContractIdentifier; - use clarity::vm::{ClarityVersion, MAX_CALL_STACK_DEPTH}; - use stacks_common::address::AddressHashMode; - use stacks_common::types::chainstate::{ - BlockHeaderHash, StacksBlockId, StacksWorkScore, TrieHash, - }; - use stacks_common::types::Address; - use stacks_common::util::hash::MerkleTree; - use stacks_common::util::sleep_ms; - use stacks_common::util::vrf::VRFProof; - - use super::*; - use crate::burnchains::tests::TestMiner; - use crate::chainstate::stacks::db::blocks::{MINIMUM_TX_FEE, MINIMUM_TX_FEE_RATE_PER_BYTE}; - use crate::chainstate::stacks::miner::{BlockBuilderSettings, StacksMicroblockBuilder}; - use crate::chainstate::stacks::test::codec_all_transactions; - use crate::chainstate::stacks::tests::{ - make_coinbase, make_coinbase_with_nonce, make_smart_contract_with_version, - make_user_stacks_transfer, - }; - use crate::chainstate::stacks::{Error as ChainstateError, *}; - use crate::clarity_vm::clarity::ClarityConnection; - use crate::core::*; - use crate::net::api::getinfo::RPCPeerInfoData; - use crate::net::asn::*; - use crate::net::chat::*; - use crate::net::codec::*; - use crate::net::download::*; - use crate::net::http::{HttpRequestContents, HttpRequestPreamble}; - use crate::net::httpcore::StacksHttpMessage; - use crate::net::inv::inv2x::*; - use crate::net::test::*; - use crate::net::tests::download::epoch2x::run_get_blocks_and_microblocks; - use crate::net::*; - use crate::util_lib::test::*; - - #[test] - fn test_relayer_stats_add_relyed_messages() { - let mut relay_stats = RelayerStats::new(); - - let all_transactions = codec_all_transactions( - &TransactionVersion::Testnet, - 0x80000000, - &TransactionAnchorMode::Any, - &TransactionPostConditionMode::Allow, - ); - assert!(all_transactions.len() > MAX_RECENT_MESSAGES); - - eprintln!("Test with {} transactions", all_transactions.len()); - - let nk = NeighborKey { - peer_version: 12345, - network_id: 0x80000000, - addrbytes: PeerAddress([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0xff, 0xff, 127, 0, 0, 1]), - port: 54321, - }; - - // never overflow recent messages for a neighbor - for (i, tx) in all_transactions.iter().enumerate() { - relay_stats.add_relayed_message(nk.clone(), tx); - - assert_eq!(relay_stats.recent_messages.len(), 1); - assert!(relay_stats.recent_messages.get(&nk).unwrap().len() <= MAX_RECENT_MESSAGES); - - assert_eq!(relay_stats.recent_updates.len(), 1); - } - - assert_eq!( - relay_stats.recent_messages.get(&nk).unwrap().len(), - MAX_RECENT_MESSAGES - ); - for i in (all_transactions.len() - MAX_RECENT_MESSAGES)..MAX_RECENT_MESSAGES { - let digest = all_transactions[i].get_digest(); - let mut found = false; - for (_, hash) in relay_stats.recent_messages.get(&nk).unwrap().iter() { - found = found || (*hash == digest); - } - if !found { - assert!(false); + for (nk, txs) in network_result.pushed_transactions.iter() { + for (_, tx) in txs.iter() { + self.relayer_stats.add_relayed_message((*nk).clone(), tx); } } - - // never overflow number of neighbors tracked - for i in 0..(MAX_RELAYER_STATS + 1) { - let mut new_nk = nk.clone(); - new_nk.peer_version += i as u32; - - relay_stats.add_relayed_message(new_nk, &all_transactions[0]); - - assert!(relay_stats.recent_updates.len() <= i + 1); - assert!(relay_stats.recent_updates.len() <= MAX_RELAYER_STATS); - } - } - - #[test] - fn test_relayer_merge_stats() { - let mut relayer_stats = RelayerStats::new(); - - let na = NeighborAddress { - addrbytes: PeerAddress([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0xff, 0xff, 127, 0, 0, 1]), - port: 54321, - public_key_hash: Hash160([0u8; 20]), - }; - - let relay_stats = RelayStats { - num_messages: 1, - num_bytes: 1, - last_seen: 1, - }; - - let mut rs = HashMap::new(); - rs.insert(na.clone(), relay_stats.clone()); - - relayer_stats.merge_relay_stats(rs); - assert_eq!(relayer_stats.relay_stats.len(), 1); - assert_eq!(relayer_stats.relay_stats.get(&na).unwrap().num_messages, 1); - assert_eq!(relayer_stats.relay_stats.get(&na).unwrap().num_bytes, 1); - assert_eq!(relayer_stats.relay_stats.get(&na).unwrap().last_seen, 1); - assert_eq!(relayer_stats.relay_updates.len(), 1); - - let now = get_epoch_time_secs() + 60; - - let relay_stats_2 = RelayStats { - num_messages: 2, - num_bytes: 2, - last_seen: now, - }; - - let mut rs = HashMap::new(); - rs.insert(na.clone(), relay_stats_2.clone()); - - relayer_stats.merge_relay_stats(rs); - assert_eq!(relayer_stats.relay_stats.len(), 1); - assert_eq!(relayer_stats.relay_stats.get(&na).unwrap().num_messages, 3); - assert_eq!(relayer_stats.relay_stats.get(&na).unwrap().num_bytes, 3); - assert!( - relayer_stats.relay_stats.get(&na).unwrap().last_seen < now - && relayer_stats.relay_stats.get(&na).unwrap().last_seen >= get_epoch_time_secs() - ); - assert_eq!(relayer_stats.relay_updates.len(), 1); - - let relay_stats_3 = RelayStats { - num_messages: 3, - num_bytes: 3, - last_seen: 0, - }; - - let mut rs = HashMap::new(); - rs.insert(na.clone(), relay_stats_3.clone()); - - relayer_stats.merge_relay_stats(rs); - assert_eq!(relayer_stats.relay_stats.len(), 1); - assert_eq!(relayer_stats.relay_stats.get(&na).unwrap().num_messages, 3); - assert_eq!(relayer_stats.relay_stats.get(&na).unwrap().num_bytes, 3); - assert!( - relayer_stats.relay_stats.get(&na).unwrap().last_seen < now - && relayer_stats.relay_stats.get(&na).unwrap().last_seen >= get_epoch_time_secs() - ); - assert_eq!(relayer_stats.relay_updates.len(), 1); - - for i in 0..(MAX_RELAYER_STATS + 1) { - let na = NeighborAddress { - addrbytes: PeerAddress([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0xff, 0xff, 127, 0, 0, 1]), - port: 14321 + (i as u16), - public_key_hash: Hash160([0u8; 20]), - }; - - let now = get_epoch_time_secs() + (i as u64) + 1; - - let relay_stats = RelayStats { - num_messages: 1, - num_bytes: 1, - last_seen: now, - }; - - let mut rs = HashMap::new(); - rs.insert(na.clone(), relay_stats.clone()); - - relayer_stats.merge_relay_stats(rs); - assert!(relayer_stats.relay_stats.len() <= MAX_RELAYER_STATS); - assert_eq!(relayer_stats.relay_stats.get(&na).unwrap().num_messages, 1); - assert_eq!(relayer_stats.relay_stats.get(&na).unwrap().num_bytes, 1); - assert_eq!(relayer_stats.relay_stats.get(&na).unwrap().last_seen, now); - } - } - - #[test] - fn test_relay_inbound_peer_rankings() { - let mut relay_stats = RelayerStats::new(); - - let all_transactions = codec_all_transactions( - &TransactionVersion::Testnet, - 0x80000000, - &TransactionAnchorMode::Any, - &TransactionPostConditionMode::Allow, - ); - assert!(all_transactions.len() > MAX_RECENT_MESSAGES); - - let nk_1 = NeighborKey { - peer_version: 12345, - network_id: 0x80000000, - addrbytes: PeerAddress([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0xff, 0xff, 127, 0, 0, 1]), - port: 54321, - }; - - let nk_2 = NeighborKey { - peer_version: 12345, - network_id: 0x80000000, - addrbytes: PeerAddress([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0xff, 0xff, 127, 0, 0, 1]), - port: 54322, - }; - - let nk_3 = NeighborKey { - peer_version: 12345, - network_id: 0x80000000, - addrbytes: PeerAddress([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0xff, 0xff, 127, 0, 0, 1]), - port: 54323, - }; - - let dups = relay_stats.count_relay_dups(&all_transactions[0]); - assert_eq!(dups.len(), 0); - - relay_stats.add_relayed_message(nk_1.clone(), &all_transactions[0]); - relay_stats.add_relayed_message(nk_1.clone(), &all_transactions[0]); - relay_stats.add_relayed_message(nk_1.clone(), &all_transactions[0]); - - let dups = relay_stats.count_relay_dups(&all_transactions[0]); - assert_eq!(dups.len(), 1); - assert_eq!(*dups.get(&nk_1).unwrap(), 3); - - relay_stats.add_relayed_message(nk_2.clone(), &all_transactions[0]); - relay_stats.add_relayed_message(nk_2.clone(), &all_transactions[0]); - relay_stats.add_relayed_message(nk_2.clone(), &all_transactions[0]); - relay_stats.add_relayed_message(nk_2.clone(), &all_transactions[0]); - - let dups = relay_stats.count_relay_dups(&all_transactions[0]); - assert_eq!(dups.len(), 2); - assert_eq!(*dups.get(&nk_1).unwrap(), 3); - assert_eq!(*dups.get(&nk_2).unwrap(), 4); - - // total dups == 7 - let dist = relay_stats.get_inbound_relay_rankings( - &vec![nk_1.clone(), nk_2.clone(), nk_3.clone()], - &all_transactions[0], - 0, - ); - assert_eq!(*dist.get(&nk_1).unwrap(), 7 - 3 + 1); - assert_eq!(*dist.get(&nk_2).unwrap(), 7 - 4 + 1); - assert_eq!(*dist.get(&nk_3).unwrap(), 7 + 1); - - // high warmup period - let dist = relay_stats.get_inbound_relay_rankings( - &vec![nk_1.clone(), nk_2.clone(), nk_3.clone()], - &all_transactions[0], - 100, - ); - assert_eq!(*dist.get(&nk_1).unwrap(), 100 + 1); - assert_eq!(*dist.get(&nk_2).unwrap(), 100 + 1); - assert_eq!(*dist.get(&nk_3).unwrap(), 100 + 1); - } - - #[test] - fn test_relay_outbound_peer_rankings() { - let relay_stats = RelayerStats::new(); - - let asn1 = ASEntry4 { - prefix: 0x10000000, - mask: 8, - asn: 1, - org: 1, - }; - - let asn2 = ASEntry4 { - prefix: 0x20000000, - mask: 8, - asn: 2, - org: 2, - }; - - let nk_1 = NeighborKey { - peer_version: 12345, - network_id: 0x80000000, - addrbytes: PeerAddress([ - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0xff, 0xff, 0x10, 0x11, 0x12, 0x13, - ]), - port: 54321, - }; - - let nk_2 = NeighborKey { - peer_version: 12345, - network_id: 0x80000000, - addrbytes: PeerAddress([ - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0xff, 0xff, 0x20, 0x21, 0x22, 0x23, - ]), - port: 54322, - }; - - let nk_3 = NeighborKey { - peer_version: 12345, - network_id: 0x80000000, - addrbytes: PeerAddress([ - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0xff, 0xff, 0x20, 0x21, 0x22, 0x24, - ]), - port: 54323, - }; - - let n1 = Neighbor { - addr: nk_1.clone(), - public_key: Secp256k1PublicKey::from_hex( - "0260569384baa726f877d47045931e5310383f18d0b243a9b6c095cee6ef19abd6", - ) - .unwrap(), - expire_block: 4302, - last_contact_time: 0, - allowed: 0, - denied: 0, - asn: 1, - org: 1, - in_degree: 0, - out_degree: 0, - }; - - let n2 = Neighbor { - addr: nk_2.clone(), - public_key: Secp256k1PublicKey::from_hex( - "02465f9ff58dfa8e844fec86fa5fc3fd59c75ea807e20d469b0a9f885d2891fbd4", - ) - .unwrap(), - expire_block: 4302, - last_contact_time: 0, - allowed: 0, - denied: 0, - asn: 2, - org: 2, - in_degree: 0, - out_degree: 0, - }; - - let n3 = Neighbor { - addr: nk_3.clone(), - public_key: Secp256k1PublicKey::from_hex( - "032d8a1ea2282c1514fdc1a6f21019561569d02a225cf7c14b4f803b0393cef031", - ) - .unwrap(), - expire_block: 4302, - last_contact_time: 0, - allowed: 0, - denied: 0, - asn: 2, - org: 2, - in_degree: 0, - out_degree: 0, - }; - - let peerdb = PeerDB::connect_memory( - 0x80000000, - 0, - 4032, - UrlString::try_from("http://foo.com").unwrap(), - &vec![asn1, asn2], - &vec![n1.clone(), n2.clone(), n3.clone()], - ) - .unwrap(); - - let asn_count = RelayerStats::count_ASNs( - peerdb.conn(), - &vec![nk_1.clone(), nk_2.clone(), nk_3.clone()], - ) - .unwrap(); - assert_eq!(asn_count.len(), 3); - assert_eq!(*asn_count.get(&nk_1).unwrap(), 1); - assert_eq!(*asn_count.get(&nk_2).unwrap(), 2); - assert_eq!(*asn_count.get(&nk_3).unwrap(), 2); - - let ranking = relay_stats - .get_outbound_relay_rankings(&peerdb, &vec![nk_1.clone(), nk_2.clone(), nk_3.clone()]) - .unwrap(); - assert_eq!(ranking.len(), 3); - assert_eq!(*ranking.get(&nk_1).unwrap(), 5 - 1 + 1); - assert_eq!(*ranking.get(&nk_2).unwrap(), 5 - 2 + 1); - assert_eq!(*ranking.get(&nk_3).unwrap(), 5 - 2 + 1); - - let ranking = relay_stats - .get_outbound_relay_rankings(&peerdb, &vec![nk_2.clone(), nk_3.clone()]) - .unwrap(); - assert_eq!(ranking.len(), 2); - assert_eq!(*ranking.get(&nk_2).unwrap(), 4 - 2 + 1); - assert_eq!(*ranking.get(&nk_3).unwrap(), 4 - 2 + 1); - } - - #[test] - #[ignore] - fn test_get_blocks_and_microblocks_3_peers_push_available() { - with_timeout(600, || { - run_get_blocks_and_microblocks( - "test_get_blocks_and_microblocks_3_peers_push_available", - 4200, - 3, - |ref mut peer_configs| { - // build initial network topology. - assert_eq!(peer_configs.len(), 3); - - // peer 0 produces the blocks - peer_configs[0].connection_opts.disable_chat_neighbors = true; - - // peer 1 downloads the blocks from peer 0, and sends - // BlocksAvailable and MicroblocksAvailable messages to - // peer 2. - peer_configs[1].connection_opts.disable_chat_neighbors = true; - - // peer 2 learns about the blocks and microblocks from peer 1's - // BlocksAvaiable and MicroblocksAvailable messages, but - // not from inv syncs. - peer_configs[2].connection_opts.disable_chat_neighbors = true; - peer_configs[2].connection_opts.disable_inv_sync = true; - - // disable nat punches -- disconnect/reconnect - // clears inv state - peer_configs[0].connection_opts.disable_natpunch = true; - peer_configs[1].connection_opts.disable_natpunch = true; - peer_configs[2].connection_opts.disable_natpunch = true; - - // do not push blocks and microblocks; only announce them - peer_configs[0].connection_opts.disable_block_push = true; - peer_configs[1].connection_opts.disable_block_push = true; - peer_configs[2].connection_opts.disable_block_push = true; - - peer_configs[0].connection_opts.disable_microblock_push = true; - peer_configs[1].connection_opts.disable_microblock_push = true; - peer_configs[2].connection_opts.disable_microblock_push = true; - - // generous timeouts - peer_configs[0].connection_opts.connect_timeout = 180; - peer_configs[1].connection_opts.connect_timeout = 180; - peer_configs[2].connection_opts.connect_timeout = 180; - peer_configs[0].connection_opts.timeout = 180; - peer_configs[1].connection_opts.timeout = 180; - peer_configs[2].connection_opts.timeout = 180; - - let peer_0 = peer_configs[0].to_neighbor(); - let peer_1 = peer_configs[1].to_neighbor(); - let peer_2 = peer_configs[2].to_neighbor(); - - peer_configs[0].add_neighbor(&peer_1); - peer_configs[1].add_neighbor(&peer_0); - peer_configs[2].add_neighbor(&peer_1); - }, - |num_blocks, ref mut peers| { - let tip = SortitionDB::get_canonical_burn_chain_tip( - &peers[0].sortdb.as_ref().unwrap().conn(), - ) - .unwrap(); - let this_reward_cycle = peers[0] - .config - .burnchain - .block_height_to_reward_cycle(tip.block_height) - .unwrap(); - - // build up block data to replicate - let mut block_data = vec![]; - for _ in 0..num_blocks { - // only produce blocks for a single reward - // cycle, since pushing block/microblock - // announcements in reward cycles the remote - // peer doesn't know about won't work. - let tip = SortitionDB::get_canonical_burn_chain_tip( - &peers[0].sortdb.as_ref().unwrap().conn(), - ) - .unwrap(); - if peers[0] - .config - .burnchain - .block_height_to_reward_cycle(tip.block_height) - .unwrap() - != this_reward_cycle - { - continue; - } - - let (mut burn_ops, stacks_block, microblocks) = - peers[0].make_default_tenure(); - - let (_, burn_header_hash, consensus_hash) = - peers[0].next_burnchain_block(burn_ops.clone()); - peers[0].process_stacks_epoch_at_tip(&stacks_block, µblocks); - - TestPeer::set_ops_burn_header_hash(&mut burn_ops, &burn_header_hash); - - for i in 1..peers.len() { - peers[i].next_burnchain_block_raw(burn_ops.clone()); - } - - let sn = SortitionDB::get_canonical_burn_chain_tip( - &peers[0].sortdb.as_ref().unwrap().conn(), - ) - .unwrap(); - block_data.push(( - sn.consensus_hash.clone(), - Some(stacks_block), - Some(microblocks), - )); - } - - assert_eq!(block_data.len(), 5); - - block_data - }, - |ref mut peers| { - // make sure peer 2's inv has an entry for peer 1, even - // though it's not doing an inv sync. This is required for the downloader to - // work, and for (Micro)BlocksAvailable messages to be accepted - let peer_1_nk = peers[1].to_neighbor().addr; - let peer_2_nk = peers[2].to_neighbor().addr; - let bc = peers[1].config.burnchain.clone(); - match peers[2].network.inv_state { - Some(ref mut inv_state) => { - if inv_state.get_stats(&peer_1_nk).is_none() { - test_debug!("initialize inv statistics for peer 1 in peer 2"); - inv_state.add_peer(peer_1_nk.clone(), true); - if let Some(ref mut stats) = inv_state.get_stats_mut(&peer_1_nk) { - stats.scans = 1; - stats.inv.merge_pox_inv(&bc, 0, 6, vec![0xff], false); - stats.inv.merge_blocks_inv( - 0, - 30, - vec![0, 0, 0, 0, 0], - vec![0, 0, 0, 0, 0], - false, - ); - } else { - panic!("Unable to instantiate inv stats for {:?}", &peer_1_nk); - } - } else { - test_debug!("peer 2 has inv state for peer 1"); - } - } - None => { - test_debug!("No inv state for peer 1"); - } - } - - let tip = SortitionDB::get_canonical_burn_chain_tip( - &peers[0].sortdb.as_ref().unwrap().conn(), - ) - .unwrap(); - let this_reward_cycle = peers[0] - .config - .burnchain - .block_height_to_reward_cycle(tip.block_height) - .unwrap(); - - let peer_1_nk = peers[1].to_neighbor().addr; - match peers[2].network.inv_state { - Some(ref mut inv_state) => { - if inv_state.get_stats(&peer_1_nk).is_none() { - test_debug!("initialize inv statistics for peer 1 in peer 2"); - inv_state.add_peer(peer_1_nk.clone(), true); - - inv_state - .get_stats_mut(&peer_1_nk) - .unwrap() - .inv - .num_reward_cycles = this_reward_cycle; - inv_state.get_stats_mut(&peer_1_nk).unwrap().inv.pox_inv = - vec![0x3f]; - } else { - test_debug!("peer 2 has inv state for peer 1"); - } - } - None => { - test_debug!("No inv state for peer 2"); - } - } - - // peer 2 should never see a BlocksInv - // message. That would imply it asked for an inv - for (_, convo) in peers[2].network.peers.iter() { - assert_eq!( - convo - .stats - .get_message_recv_count(StacksMessageID::BlocksInv), - 0 - ); - } - }, - |ref peer| { - // check peer health - // TODO - true - }, - |_| true, - ); - }) } - - fn is_peer_connected(peer: &TestPeer, dest: &NeighborKey) -> bool { - let event_id = match peer.network.events.get(dest) { - Some(evid) => *evid, - None => { - return false; - } - }; - - match peer.network.peers.get(&event_id) { - Some(convo) => { - return convo.is_authenticated(); - } - None => { - return false; - } - } - } - - fn push_message( - peer: &mut TestPeer, - dest: &NeighborKey, - relay_hints: Vec, - msg: StacksMessageType, - ) -> bool { - let event_id = match peer.network.events.get(dest) { - Some(evid) => *evid, - None => { - panic!("Unreachable peer: {:?}", dest); - } - }; - - let relay_msg = match peer.network.peers.get_mut(&event_id) { - Some(convo) => convo - .sign_relay_message( - &peer.network.local_peer, - &peer.network.chain_view, - relay_hints, - msg, - ) - .unwrap(), - None => { - panic!("No such event ID {} from neighbor {}", event_id, dest); - } - }; - - match peer.network.relay_signed_message(dest, relay_msg.clone()) { - Ok(_) => { - return true; - } - Err(net_error::OutboxOverflow) => { - test_debug!( - "{:?} outbox overflow; try again later", - &peer.to_neighbor().addr - ); - return false; - } - Err(net_error::SendError(msg)) => { - warn!( - "Failed to send to {:?}: SendError({})", - &peer.to_neighbor().addr, - msg - ); - return false; - } - Err(e) => { - test_debug!( - "{:?} encountered fatal error when forwarding: {:?}", - &peer.to_neighbor().addr, - &e - ); - assert!(false); - unreachable!(); - } - } - } - - fn http_rpc( - peer_http: u16, - request: StacksHttpRequest, - ) -> Result { - use std::net::TcpStream; - - let mut sock = TcpStream::connect( - &format!("127.0.0.1:{}", peer_http) - .parse::() - .unwrap(), - ) - .unwrap(); - - let request_bytes = request.try_serialize().unwrap(); - match sock.write_all(&request_bytes) { - Ok(_) => {} - Err(e) => { - test_debug!("Client failed to write: {:?}", &e); - return Err(net_error::WriteError(e)); - } - } - - let mut resp = vec![]; - match sock.read_to_end(&mut resp) { - Ok(_) => { - if resp.len() == 0 { - test_debug!("Client did not receive any data"); - return Err(net_error::PermanentlyDrained); - } - } - Err(e) => { - test_debug!("Client failed to read: {:?}", &e); - return Err(net_error::ReadError(e)); - } - } - - test_debug!("Client received {} bytes", resp.len()); - let response = StacksHttp::parse_response( - &request.preamble().verb, - &request.preamble().path_and_query_str, - &resp, - ) - .unwrap(); - match response { - StacksHttpMessage::Response(x) => Ok(x), - _ => { - panic!("Did not receive a Response"); - } - } - } - - fn broadcast_message( - broadcaster: &mut TestPeer, - relay_hints: Vec, - msg: StacksMessageType, - ) -> bool { - let request = NetworkRequest::Broadcast(relay_hints, msg); - match broadcaster.network.dispatch_request(request) { - Ok(_) => true, - Err(e) => { - error!("Failed to broadcast: {:?}", &e); - false - } - } - } - - fn push_block( - peer: &mut TestPeer, - dest: &NeighborKey, - relay_hints: Vec, - consensus_hash: ConsensusHash, - block: StacksBlock, - ) -> bool { - test_debug!( - "{:?}: Push block {}/{} to {:?}", - peer.to_neighbor().addr, - &consensus_hash, - block.block_hash(), - dest - ); - - let sn = SortitionDB::get_block_snapshot_consensus( - peer.sortdb.as_ref().unwrap().conn(), - &consensus_hash, - ) - .unwrap() - .unwrap(); - let consensus_hash = sn.consensus_hash; - - let msg = StacksMessageType::Blocks(BlocksData { - blocks: vec![BlocksDatum(consensus_hash, block)], - }); - push_message(peer, dest, relay_hints, msg) - } - - fn broadcast_block( - peer: &mut TestPeer, - relay_hints: Vec, - consensus_hash: ConsensusHash, - block: StacksBlock, - ) -> bool { - test_debug!( - "{:?}: Broadcast block {}/{}", - peer.to_neighbor().addr, - &consensus_hash, - block.block_hash(), - ); - - let sn = SortitionDB::get_block_snapshot_consensus( - peer.sortdb.as_ref().unwrap().conn(), - &consensus_hash, - ) - .unwrap() - .unwrap(); - let consensus_hash = sn.consensus_hash; - - let msg = StacksMessageType::Blocks(BlocksData { - blocks: vec![BlocksDatum(consensus_hash, block)], - }); - broadcast_message(peer, relay_hints, msg) - } - - fn push_microblocks( - peer: &mut TestPeer, - dest: &NeighborKey, - relay_hints: Vec, - consensus_hash: ConsensusHash, - block_hash: BlockHeaderHash, - microblocks: Vec, - ) -> bool { - test_debug!( - "{:?}: Push {} microblocksblock {}/{} to {:?}", - peer.to_neighbor().addr, - microblocks.len(), - &consensus_hash, - &block_hash, - dest - ); - let msg = StacksMessageType::Microblocks(MicroblocksData { - index_anchor_block: StacksBlockHeader::make_index_block_hash( - &consensus_hash, - &block_hash, - ), - microblocks: microblocks, - }); - push_message(peer, dest, relay_hints, msg) - } - - fn broadcast_microblocks( - peer: &mut TestPeer, - relay_hints: Vec, - consensus_hash: ConsensusHash, - block_hash: BlockHeaderHash, - microblocks: Vec, - ) -> bool { - test_debug!( - "{:?}: broadcast {} microblocksblock {}/{}", - peer.to_neighbor().addr, - microblocks.len(), - &consensus_hash, - &block_hash, - ); - let msg = StacksMessageType::Microblocks(MicroblocksData { - index_anchor_block: StacksBlockHeader::make_index_block_hash( - &consensus_hash, - &block_hash, - ), - microblocks: microblocks, - }); - broadcast_message(peer, relay_hints, msg) - } - - fn push_transaction( - peer: &mut TestPeer, - dest: &NeighborKey, - relay_hints: Vec, - tx: StacksTransaction, - ) -> bool { - test_debug!( - "{:?}: Push tx {} to {:?}", - peer.to_neighbor().addr, - tx.txid(), - dest - ); - let msg = StacksMessageType::Transaction(tx); - push_message(peer, dest, relay_hints, msg) - } - - fn broadcast_transaction( - peer: &mut TestPeer, - relay_hints: Vec, - tx: StacksTransaction, - ) -> bool { - test_debug!("{:?}: broadcast tx {}", peer.to_neighbor().addr, tx.txid(),); - let msg = StacksMessageType::Transaction(tx); - broadcast_message(peer, relay_hints, msg) - } - - fn http_get_info(http_port: u16) -> RPCPeerInfoData { - let mut request = HttpRequestPreamble::new_for_peer( - PeerHost::from_host_port("127.0.0.1".to_string(), http_port), - "GET".to_string(), - "/v2/info".to_string(), - ); - request.keep_alive = false; - let getinfo = StacksHttpRequest::new(request, HttpRequestContents::new()); - let response = http_rpc(http_port, getinfo).unwrap(); - let peer_info = response.decode_peer_info().unwrap(); - peer_info - } - - fn http_post_block( - http_port: u16, - consensus_hash: &ConsensusHash, - block: &StacksBlock, - ) -> bool { - test_debug!( - "upload block {}/{} to localhost:{}", - consensus_hash, - block.block_hash(), - http_port - ); - let mut request = HttpRequestPreamble::new_for_peer( - PeerHost::from_host_port("127.0.0.1".to_string(), http_port), - "POST".to_string(), - "/v2/blocks".to_string(), - ); - request.keep_alive = false; - let post_block = - StacksHttpRequest::new(request, HttpRequestContents::new().payload_stacks(block)); - - let response = http_rpc(http_port, post_block).unwrap(); - let accepted = response.decode_stacks_block_accepted().unwrap(); - accepted.accepted - } - - fn http_post_microblock( - http_port: u16, - consensus_hash: &ConsensusHash, - block_hash: &BlockHeaderHash, - mblock: &StacksMicroblock, - ) -> bool { - test_debug!( - "upload microblock {}/{}-{} to localhost:{}", - consensus_hash, - block_hash, - mblock.block_hash(), - http_port - ); - let mut request = HttpRequestPreamble::new_for_peer( - PeerHost::from_host_port("127.0.0.1".to_string(), http_port), - "POST".to_string(), - "/v2/microblocks".to_string(), - ); - request.keep_alive = false; - let tip = StacksBlockHeader::make_index_block_hash(consensus_hash, block_hash); - let post_microblock = StacksHttpRequest::new( - request, - HttpRequestContents::new() - .payload_stacks(mblock) - .for_specific_tip(tip), - ); - - let response = http_rpc(http_port, post_microblock).unwrap(); - let payload = response.get_http_payload_ok().unwrap(); - let bhh: BlockHeaderHash = serde_json::from_value(payload.try_into().unwrap()).unwrap(); - return true; - } - - fn test_get_blocks_and_microblocks_2_peers_push_blocks_and_microblocks( - outbound_test: bool, - disable_push: bool, - ) { - with_timeout(600, move || { - let original_blocks_and_microblocks = RefCell::new(vec![]); - let blocks_and_microblocks = RefCell::new(vec![]); - let idx = RefCell::new(0); - let sent_blocks = RefCell::new(false); - let sent_microblocks = RefCell::new(false); - - run_get_blocks_and_microblocks( - "test_get_blocks_and_microblocks_2_peers_push_blocks_and_microblocks", - 4210, - 2, - |ref mut peer_configs| { - // build initial network topology. - assert_eq!(peer_configs.len(), 2); - - // peer 0 produces the blocks and pushes them to peer 1 - // peer 1 receives the blocks and microblocks. It - // doesn't download them, nor does it try to get invs - peer_configs[0].connection_opts.disable_block_advertisement = true; - - peer_configs[1].connection_opts.disable_inv_sync = true; - peer_configs[1].connection_opts.disable_block_download = true; - peer_configs[1].connection_opts.disable_block_advertisement = true; - - // disable nat punches -- disconnect/reconnect - // clears inv state - peer_configs[0].connection_opts.disable_natpunch = true; - peer_configs[1].connection_opts.disable_natpunch = true; - - // force usage of blocksavailable/microblocksavailable? - if disable_push { - peer_configs[0].connection_opts.disable_block_push = true; - peer_configs[0].connection_opts.disable_microblock_push = true; - peer_configs[1].connection_opts.disable_block_push = true; - peer_configs[1].connection_opts.disable_microblock_push = true; - } - - let peer_0 = peer_configs[0].to_neighbor(); - let peer_1 = peer_configs[1].to_neighbor(); - - peer_configs[0].add_neighbor(&peer_1); - - if outbound_test { - // neighbor relationship is symmetric -- peer 1 has an outbound connection - // to peer 0. - peer_configs[1].add_neighbor(&peer_0); - } - }, - |num_blocks, ref mut peers| { - let tip = SortitionDB::get_canonical_burn_chain_tip( - &peers[0].sortdb.as_ref().unwrap().conn(), - ) - .unwrap(); - let this_reward_cycle = peers[0] - .config - .burnchain - .block_height_to_reward_cycle(tip.block_height) - .unwrap(); - - // build up block data to replicate - let mut block_data = vec![]; - for _ in 0..num_blocks { - let tip = SortitionDB::get_canonical_burn_chain_tip( - &peers[0].sortdb.as_ref().unwrap().conn(), - ) - .unwrap(); - if peers[0] - .config - .burnchain - .block_height_to_reward_cycle(tip.block_height) - .unwrap() - != this_reward_cycle - { - continue; - } - let (mut burn_ops, stacks_block, microblocks) = - peers[0].make_default_tenure(); - - let (_, burn_header_hash, consensus_hash) = - peers[0].next_burnchain_block(burn_ops.clone()); - peers[0].process_stacks_epoch_at_tip(&stacks_block, µblocks); - - TestPeer::set_ops_burn_header_hash(&mut burn_ops, &burn_header_hash); - - for i in 1..peers.len() { - peers[i].next_burnchain_block_raw(burn_ops.clone()); - } - - let sn = SortitionDB::get_canonical_burn_chain_tip( - &peers[0].sortdb.as_ref().unwrap().conn(), - ) - .unwrap(); - block_data.push(( - sn.consensus_hash.clone(), - Some(stacks_block), - Some(microblocks), - )); - } - let saved_copy: Vec<(ConsensusHash, StacksBlock, Vec)> = - block_data - .clone() - .drain(..) - .map(|(ch, blk_opt, mblocks_opt)| { - (ch, blk_opt.unwrap(), mblocks_opt.unwrap()) - }) - .collect(); - *blocks_and_microblocks.borrow_mut() = saved_copy.clone(); - *original_blocks_and_microblocks.borrow_mut() = saved_copy; - block_data - }, - |ref mut peers| { - if !disable_push { - for peer in peers.iter_mut() { - // force peers to keep trying to process buffered data - peer.network.burnchain_tip.burn_header_hash = - BurnchainHeaderHash([0u8; 32]); - } - } - - // make sure peer 1's inv has an entry for peer 0, even - // though it's not doing an inv sync. This is required for the downloader to - // work - let peer_0_nk = peers[0].to_neighbor().addr; - let peer_1_nk = peers[1].to_neighbor().addr; - match peers[1].network.inv_state { - Some(ref mut inv_state) => { - if inv_state.get_stats(&peer_0_nk).is_none() { - test_debug!("initialize inv statistics for peer 0 in peer 1"); - inv_state.add_peer(peer_0_nk.clone(), true); - } else { - test_debug!("peer 1 has inv state for peer 0"); - } - } - None => { - test_debug!("No inv state for peer 1"); - } - } - - if is_peer_connected(&peers[0], &peer_1_nk) { - // randomly push a block and/or microblocks to peer 1. - let mut block_data = blocks_and_microblocks.borrow_mut(); - let original_block_data = original_blocks_and_microblocks.borrow(); - let mut next_idx = idx.borrow_mut(); - let data_to_push = { - if block_data.len() > 0 { - let (consensus_hash, block, microblocks) = - block_data[*next_idx].clone(); - Some((consensus_hash, block, microblocks)) - } else { - // start over (can happen if a message gets - // dropped due to a timeout) - test_debug!("Reset block transmission (possible timeout)"); - *block_data = (*original_block_data).clone(); - *next_idx = thread_rng().gen::() % block_data.len(); - let (consensus_hash, block, microblocks) = - block_data[*next_idx].clone(); - Some((consensus_hash, block, microblocks)) - } - }; - - if let Some((consensus_hash, block, microblocks)) = data_to_push { - test_debug!( - "Push block {}/{} and microblocks", - &consensus_hash, - block.block_hash() - ); - - let block_hash = block.block_hash(); - let mut sent_blocks = sent_blocks.borrow_mut(); - let mut sent_microblocks = sent_microblocks.borrow_mut(); - - let pushed_block = if !*sent_blocks { - push_block( - &mut peers[0], - &peer_1_nk, - vec![], - consensus_hash.clone(), - block, - ) - } else { - true - }; - - *sent_blocks = pushed_block; - - if pushed_block { - let pushed_microblock = if !*sent_microblocks { - push_microblocks( - &mut peers[0], - &peer_1_nk, - vec![], - consensus_hash, - block_hash, - microblocks, - ) - } else { - true - }; - - *sent_microblocks = pushed_microblock; - - if pushed_block && pushed_microblock { - block_data.remove(*next_idx); - if block_data.len() > 0 { - *next_idx = thread_rng().gen::() % block_data.len(); - } - *sent_blocks = false; - *sent_microblocks = false; - } - } - test_debug!("{} blocks/microblocks remaining", block_data.len()); - } - } - - // peer 0 should never see a GetBlocksInv message. - // peer 1 should never see a BlocksInv message - for (_, convo) in peers[0].network.peers.iter() { - assert_eq!( - convo - .stats - .get_message_recv_count(StacksMessageID::GetBlocksInv), - 0 - ); - } - for (_, convo) in peers[1].network.peers.iter() { - assert_eq!( - convo - .stats - .get_message_recv_count(StacksMessageID::BlocksInv), - 0 - ); - } - }, - |ref peer| { - // check peer health - // nothing should break - // TODO - true - }, - |_| true, - ); - }) - } - - #[test] - #[ignore] - fn test_get_blocks_and_microblocks_2_peers_push_blocks_and_microblocks_outbound() { - // simulates node 0 pushing blocks to node 1, but node 0 is publicly routable. - // nodes rely on blocksavailable/microblocksavailable to discover blocks - test_get_blocks_and_microblocks_2_peers_push_blocks_and_microblocks(true, true) - } - - #[test] - #[ignore] - fn test_get_blocks_and_microblocks_2_peers_push_blocks_and_microblocks_inbound() { - // simulates node 0 pushing blocks to node 1, where node 0 is behind a NAT - // nodes rely on blocksavailable/microblocksavailable to discover blocks - test_get_blocks_and_microblocks_2_peers_push_blocks_and_microblocks(false, true) - } - - #[test] - #[ignore] - fn test_get_blocks_and_microblocks_2_peers_push_blocks_and_microblocks_outbound_direct() { - // simulates node 0 pushing blocks to node 1, but node 0 is publicly routable. - // nodes may push blocks and microblocks directly to each other - test_get_blocks_and_microblocks_2_peers_push_blocks_and_microblocks(true, false) - } - - #[test] - #[ignore] - fn test_get_blocks_and_microblocks_2_peers_push_blocks_and_microblocks_inbound_direct() { - // simulates node 0 pushing blocks to node 1, where node 0 is behind a NAT - // nodes may push blocks and microblocks directly to each other - test_get_blocks_and_microblocks_2_peers_push_blocks_and_microblocks(false, false) - } - - #[test] - #[ignore] - fn test_get_blocks_and_microblocks_upload_blocks_http() { - with_timeout(600, || { - let (port_sx, port_rx) = std::sync::mpsc::sync_channel(1); - let (block_sx, block_rx) = std::sync::mpsc::sync_channel(1); - - std::thread::spawn(move || loop { - eprintln!("Get port"); - let remote_port: u16 = port_rx.recv().unwrap(); - eprintln!("Got port {}", remote_port); - - eprintln!("Send getinfo"); - let peer_info = http_get_info(remote_port); - eprintln!("Got getinfo! {:?}", &peer_info); - let idx = peer_info.stacks_tip_height as usize; - - eprintln!("Get blocks and microblocks"); - let blocks_and_microblocks: Vec<( - ConsensusHash, - Option, - Option>, - )> = block_rx.recv().unwrap(); - eprintln!("Got blocks and microblocks!"); - - if idx >= blocks_and_microblocks.len() { - eprintln!("Out of blocks to send!"); - return; - } - - eprintln!( - "Upload block {}", - &blocks_and_microblocks[idx].1.as_ref().unwrap().block_hash() - ); - http_post_block( - remote_port, - &blocks_and_microblocks[idx].0, - blocks_and_microblocks[idx].1.as_ref().unwrap(), - ); - for mblock in blocks_and_microblocks[idx].2.as_ref().unwrap().iter() { - eprintln!("Upload microblock {}", mblock.block_hash()); - http_post_microblock( - remote_port, - &blocks_and_microblocks[idx].0, - &blocks_and_microblocks[idx].1.as_ref().unwrap().block_hash(), - mblock, - ); - } - }); - - let original_blocks_and_microblocks = RefCell::new(vec![]); - let port_sx_cell = RefCell::new(port_sx); - let block_sx_cell = RefCell::new(block_sx); - - run_get_blocks_and_microblocks( - "test_get_blocks_and_microblocks_upload_blocks_http", - 4250, - 2, - |ref mut peer_configs| { - // build initial network topology. - assert_eq!(peer_configs.len(), 2); - - // peer 0 produces the blocks - peer_configs[0].connection_opts.disable_chat_neighbors = true; - - // peer 0 sends them to peer 1 - peer_configs[1].connection_opts.disable_chat_neighbors = true; - peer_configs[1].connection_opts.disable_inv_sync = true; - - // disable nat punches -- disconnect/reconnect - // clears inv state - peer_configs[0].connection_opts.disable_natpunch = true; - peer_configs[1].connection_opts.disable_natpunch = true; - - // generous timeouts - peer_configs[0].connection_opts.timeout = 180; - peer_configs[1].connection_opts.timeout = 180; - - let peer_0 = peer_configs[0].to_neighbor(); - let peer_1 = peer_configs[1].to_neighbor(); - }, - |num_blocks, ref mut peers| { - let tip = SortitionDB::get_canonical_burn_chain_tip( - &peers[0].sortdb.as_ref().unwrap().conn(), - ) - .unwrap(); - let this_reward_cycle = peers[0] - .config - .burnchain - .block_height_to_reward_cycle(tip.block_height) - .unwrap(); - - // build up block data to replicate - let mut block_data = vec![]; - for _ in 0..num_blocks { - // only produce blocks for a single reward - // cycle, since pushing block/microblock - // announcements in reward cycles the remote - // peer doesn't know about won't work. - let tip = SortitionDB::get_canonical_burn_chain_tip( - &peers[0].sortdb.as_ref().unwrap().conn(), - ) - .unwrap(); - if peers[0] - .config - .burnchain - .block_height_to_reward_cycle(tip.block_height) - .unwrap() - != this_reward_cycle - { - continue; - } - - let (mut burn_ops, stacks_block, microblocks) = - peers[0].make_default_tenure(); - - let (_, burn_header_hash, consensus_hash) = - peers[0].next_burnchain_block(burn_ops.clone()); - peers[0].process_stacks_epoch_at_tip(&stacks_block, µblocks); - - TestPeer::set_ops_burn_header_hash(&mut burn_ops, &burn_header_hash); - - for i in 1..peers.len() { - peers[i].next_burnchain_block_raw(burn_ops.clone()); - } - - let sn = SortitionDB::get_canonical_burn_chain_tip( - &peers[0].sortdb.as_ref().unwrap().conn(), - ) - .unwrap(); - block_data.push(( - sn.consensus_hash.clone(), - Some(stacks_block), - Some(microblocks), - )); - } - - assert_eq!(block_data.len(), 5); - - *original_blocks_and_microblocks.borrow_mut() = block_data.clone(); - - block_data - }, - |ref mut peers| { - let blocks_and_microblocks = original_blocks_and_microblocks.borrow().clone(); - let remote_port = peers[1].config.http_port; - - let port_sx = port_sx_cell.borrow_mut(); - let block_sx = block_sx_cell.borrow_mut(); - - let _ = (*port_sx).try_send(remote_port); - let _ = (*block_sx).try_send(blocks_and_microblocks); - }, - |ref peer| { - // check peer health - // TODO - true - }, - |_| true, - ); - }) - } - - fn make_test_smart_contract_transaction( - peer: &mut TestPeer, - name: &str, - consensus_hash: &ConsensusHash, - block_hash: &BlockHeaderHash, - ) -> StacksTransaction { - // make a smart contract - let contract = " - (define-data-var bar int 0) - (define-public (get-bar) (ok (var-get bar))) - (define-public (set-bar (x int) (y int)) - (begin (var-set bar (/ x y)) (ok (var-get bar))))"; - - let cost_limits = peer.config.connection_opts.read_only_call_limit.clone(); - - let tx_contract = peer - .with_mining_state( - |ref mut sortdb, ref mut miner, ref mut spending_account, ref mut stacks_node| { - let mut tx_contract = StacksTransaction::new( - TransactionVersion::Testnet, - spending_account.as_transaction_auth().unwrap().into(), - TransactionPayload::new_smart_contract( - &name.to_string(), - &contract.to_string(), - None, - ) - .unwrap(), - ); - - let chain_tip = - StacksBlockHeader::make_index_block_hash(consensus_hash, block_hash); - let cur_nonce = stacks_node - .chainstate - .with_read_only_clarity_tx(&sortdb.index_conn(), &chain_tip, |clarity_tx| { - clarity_tx.with_clarity_db_readonly(|clarity_db| { - clarity_db - .get_account_nonce( - &spending_account.origin_address().unwrap().into(), - ) - .unwrap() - }) - }) - .unwrap(); - - test_debug!( - "Nonce of {:?} is {} at {}/{}", - &spending_account.origin_address().unwrap(), - cur_nonce, - consensus_hash, - block_hash - ); - - // spending_account.set_nonce(cur_nonce + 1); - - tx_contract.chain_id = 0x80000000; - tx_contract.auth.set_origin_nonce(cur_nonce); - tx_contract.set_tx_fee(MINIMUM_TX_FEE_RATE_PER_BYTE * 500); - - let mut tx_signer = StacksTransactionSigner::new(&tx_contract); - spending_account.sign_as_origin(&mut tx_signer); - - let tx_contract_signed = tx_signer.get_tx().unwrap(); - - test_debug!( - "make transaction {:?} off of {:?}/{:?}: {:?}", - &tx_contract_signed.txid(), - consensus_hash, - block_hash, - &tx_contract_signed - ); - - Ok(tx_contract_signed) - }, - ) - .unwrap(); - - tx_contract - } - - #[test] - #[ignore] - fn test_get_blocks_and_microblocks_2_peers_push_transactions() { - with_timeout(600, || { - let blocks_and_microblocks = RefCell::new(vec![]); - let blocks_idx = RefCell::new(0); - let sent_txs = RefCell::new(vec![]); - let done = RefCell::new(false); - - let peers = run_get_blocks_and_microblocks( - "test_get_blocks_and_microblocks_2_peers_push_transactions", - 4220, - 2, - |ref mut peer_configs| { - // build initial network topology. - assert_eq!(peer_configs.len(), 2); - - // peer 0 generates blocks and microblocks, and pushes - // them to peer 1. Peer 0 also generates transactions - // and pushes them to peer 1. - peer_configs[0].connection_opts.disable_block_advertisement = true; - - // let peer 0 drive this test, as before, by controlling - // when peer 1 sees blocks. - peer_configs[1].connection_opts.disable_inv_sync = true; - peer_configs[1].connection_opts.disable_block_download = true; - peer_configs[1].connection_opts.disable_block_advertisement = true; - - peer_configs[0].connection_opts.outbox_maxlen = 100; - peer_configs[1].connection_opts.inbox_maxlen = 100; - - // disable nat punches -- disconnect/reconnect - // clears inv state - peer_configs[0].connection_opts.disable_natpunch = true; - peer_configs[1].connection_opts.disable_natpunch = true; - - let initial_balances = vec![ - ( - PrincipalData::from( - peer_configs[0].spending_account.origin_address().unwrap(), - ), - 1000000, - ), - ( - PrincipalData::from( - peer_configs[1].spending_account.origin_address().unwrap(), - ), - 1000000, - ), - ]; - - peer_configs[0].initial_balances = initial_balances.clone(); - peer_configs[1].initial_balances = initial_balances.clone(); - - let peer_0 = peer_configs[0].to_neighbor(); - let peer_1 = peer_configs[1].to_neighbor(); - - peer_configs[0].add_neighbor(&peer_1); - peer_configs[1].add_neighbor(&peer_0); - }, - |num_blocks, ref mut peers| { - let tip = SortitionDB::get_canonical_burn_chain_tip( - &peers[0].sortdb.as_ref().unwrap().conn(), - ) - .unwrap(); - let this_reward_cycle = peers[0] - .config - .burnchain - .block_height_to_reward_cycle(tip.block_height) - .unwrap(); - - // build up block data to replicate - let mut block_data = vec![]; - for b in 0..num_blocks { - let tip = SortitionDB::get_canonical_burn_chain_tip( - &peers[0].sortdb.as_ref().unwrap().conn(), - ) - .unwrap(); - if peers[0] - .config - .burnchain - .block_height_to_reward_cycle(tip.block_height) - .unwrap() - != this_reward_cycle - { - continue; - } - let (mut burn_ops, stacks_block, microblocks) = - peers[0].make_default_tenure(); - - let (_, burn_header_hash, consensus_hash) = - peers[0].next_burnchain_block(burn_ops.clone()); - peers[0].process_stacks_epoch_at_tip(&stacks_block, µblocks); - - TestPeer::set_ops_burn_header_hash(&mut burn_ops, &burn_header_hash); - - for i in 1..peers.len() { - peers[i].next_burnchain_block_raw(burn_ops.clone()); - if b == 0 { - // prime with first block - peers[i].process_stacks_epoch_at_tip(&stacks_block, &vec![]); - } - } - - let sn = SortitionDB::get_canonical_burn_chain_tip( - &peers[0].sortdb.as_ref().unwrap().conn(), - ) - .unwrap(); - block_data.push(( - sn.consensus_hash.clone(), - Some(stacks_block), - Some(microblocks), - )); - } - *blocks_and_microblocks.borrow_mut() = block_data - .clone() - .drain(..) - .map(|(ch, blk_opt, mblocks_opt)| { - (ch, blk_opt.unwrap(), mblocks_opt.unwrap()) - }) - .collect(); - block_data - }, - |ref mut peers| { - let peer_0_nk = peers[0].to_neighbor().addr; - let peer_1_nk = peers[1].to_neighbor().addr; - - // peers must be connected to each other - let mut peer_0_to_1 = false; - let mut peer_1_to_0 = false; - for (nk, event_id) in peers[0].network.events.iter() { - match peers[0].network.peers.get(event_id) { - Some(convo) => { - if *nk == peer_1_nk { - peer_0_to_1 = true; - } - } - None => {} - } - } - for (nk, event_id) in peers[1].network.events.iter() { - match peers[1].network.peers.get(event_id) { - Some(convo) => { - if *nk == peer_0_nk { - peer_1_to_0 = true; - } - } - None => {} - } - } - - if !peer_0_to_1 || !peer_1_to_0 { - test_debug!( - "Peers not bi-directionally connected: 0->1 = {}, 1->0 = {}", - peer_0_to_1, - peer_1_to_0 - ); - return; - } - - // make sure peer 2's inv has an entry for peer 1, even - // though it's not doing an inv sync. - match peers[1].network.inv_state { - Some(ref mut inv_state) => { - if inv_state.get_stats(&peer_0_nk).is_none() { - test_debug!("initialize inv statistics for peer 0 in peer 1"); - inv_state.add_peer(peer_0_nk, true); - } else { - test_debug!("peer 1 has inv state for peer 0"); - } - } - None => { - test_debug!("No inv state for peer 1"); - } - } - - let done_flag = *done.borrow(); - if is_peer_connected(&peers[0], &peer_1_nk) { - // only submit the next transaction if the previous - // one is accepted - let has_last_transaction = { - let expected_txs: std::cell::Ref<'_, Vec> = - sent_txs.borrow(); - if let Some(tx) = (*expected_txs).last() { - let txid = tx.txid(); - if !peers[1].mempool.as_ref().unwrap().has_tx(&txid) { - debug!("Peer 1 still waiting for transaction {}", &txid); - push_transaction( - &mut peers[0], - &peer_1_nk, - vec![], - (*tx).clone(), - ); - false - } else { - true - } - } else { - true - } - }; - - if has_last_transaction { - // push blocks and microblocks in order, and push a - // transaction that can only be validated once the - // block and microblocks are processed. - let ( - ( - block_consensus_hash, - block, - microblocks_consensus_hash, - microblocks_block_hash, - microblocks, - ), - idx, - ) = { - let block_data = blocks_and_microblocks.borrow(); - let mut idx = blocks_idx.borrow_mut(); - - let microblocks = block_data[*idx].2.clone(); - let microblocks_consensus_hash = block_data[*idx].0.clone(); - let microblocks_block_hash = block_data[*idx].1.block_hash(); - - *idx += 1; - if *idx >= block_data.len() { - *idx = 1; - } - - let block = block_data[*idx].1.clone(); - let block_consensus_hash = block_data[*idx].0.clone(); - ( - ( - block_consensus_hash, - block, - microblocks_consensus_hash, - microblocks_block_hash, - microblocks, - ), - *idx, - ) - }; - - if !done_flag { - test_debug!( - "Push microblocks built by {}/{} (idx={})", - µblocks_consensus_hash, - µblocks_block_hash, - idx - ); - - let block_hash = block.block_hash(); - push_microblocks( - &mut peers[0], - &peer_1_nk, - vec![], - microblocks_consensus_hash, - microblocks_block_hash, - microblocks, - ); - - test_debug!( - "Push block {}/{} and microblocks (idx = {})", - &block_consensus_hash, - block.block_hash(), - idx - ); - push_block( - &mut peers[0], - &peer_1_nk, - vec![], - block_consensus_hash.clone(), - block, - ); - - // create a transaction against the resulting - // (anchored) chain tip - let tx = make_test_smart_contract_transaction( - &mut peers[0], - &format!("test-contract-{}", &block_hash.to_hex()[0..10]), - &block_consensus_hash, - &block_hash, - ); - - // push or post - push_transaction(&mut peers[0], &peer_1_nk, vec![], tx.clone()); - - let mut expected_txs = sent_txs.borrow_mut(); - expected_txs.push(tx); - } else { - test_debug!("Done pushing data"); - } - } - } - - // peer 0 should never see a GetBlocksInv message. - // peer 1 should never see a BlocksInv message - for (_, convo) in peers[0].network.peers.iter() { - assert_eq!( - convo - .stats - .get_message_recv_count(StacksMessageID::GetBlocksInv), - 0 - ); - } - for (_, convo) in peers[1].network.peers.iter() { - assert_eq!( - convo - .stats - .get_message_recv_count(StacksMessageID::BlocksInv), - 0 - ); - } - }, - |ref peer| { - // check peer health - // nothing should break - // TODO - true - }, - |ref mut peers| { - // all blocks downloaded. only stop if peer 1 has - // all the transactions - let mut done_flag = done.borrow_mut(); - *done_flag = true; - - let txs = - MemPoolDB::get_all_txs(peers[1].mempool.as_ref().unwrap().conn()).unwrap(); - test_debug!("Peer 1 has {} txs", txs.len()); - txs.len() == sent_txs.borrow().len() - }, - ); - - // peer 1 should have all the transactions - let blocks_and_microblocks = blocks_and_microblocks.into_inner(); - - let txs = MemPoolDB::get_all_txs(peers[1].mempool.as_ref().unwrap().conn()).unwrap(); - let expected_txs = sent_txs.into_inner(); - for tx in txs.iter() { - let mut found = false; - for expected_tx in expected_txs.iter() { - if tx.tx.txid() == expected_tx.txid() { - found = true; - break; - } - } - if !found { - panic!("Transaction not found: {:?}", &tx.tx); - } - } - - // peer 1 should have 1 tx per chain tip - for ((consensus_hash, block, _), sent_tx) in - blocks_and_microblocks.iter().zip(expected_txs.iter()) - { - let block_hash = block.block_hash(); - let tx_infos = MemPoolDB::get_txs_after( - peers[1].mempool.as_ref().unwrap().conn(), - consensus_hash, - &block_hash, - 0, - 1000, - ) - .unwrap(); - test_debug!( - "Check {}/{} (height {}): expect {}", - &consensus_hash, - &block_hash, - block.header.total_work.work, - &sent_tx.txid() - ); - assert_eq!(tx_infos.len(), 1); - assert_eq!(tx_infos[0].tx.txid(), sent_tx.txid()); - } - }) - } - - #[test] - #[ignore] - fn test_get_blocks_and_microblocks_peers_broadcast() { - with_timeout(600, || { - let blocks_and_microblocks = RefCell::new(vec![]); - let blocks_idx = RefCell::new(0); - let sent_txs = RefCell::new(vec![]); - let done = RefCell::new(false); - let num_peers = 3; - let privk = StacksPrivateKey::new(); - - let peers = run_get_blocks_and_microblocks( - "test_get_blocks_and_microblocks_peers_broadcast", - 4230, - num_peers, - |ref mut peer_configs| { - // build initial network topology. - assert_eq!(peer_configs.len(), num_peers); - - // peer 0 generates blocks and microblocks, and pushes - // them to peers 1..n. Peer 0 also generates transactions - // and broadcasts them to the network. - - peer_configs[0].connection_opts.disable_inv_sync = true; - peer_configs[0].connection_opts.disable_inv_chat = true; - - // disable nat punches -- disconnect/reconnect - // clears inv state. - for i in 0..peer_configs.len() { - peer_configs[i].connection_opts.disable_natpunch = true; - peer_configs[i].connection_opts.disable_network_prune = true; - peer_configs[i].connection_opts.timeout = 600; - peer_configs[i].connection_opts.connect_timeout = 600; - - // do one walk - peer_configs[i].connection_opts.num_initial_walks = 0; - peer_configs[i].connection_opts.walk_retry_count = 0; - peer_configs[i].connection_opts.walk_interval = 600; - - // don't throttle downloads - peer_configs[i].connection_opts.download_interval = 0; - peer_configs[i].connection_opts.inv_sync_interval = 0; - - let max_inflight = peer_configs[i].connection_opts.max_inflight_blocks; - peer_configs[i].connection_opts.max_clients_per_host = - ((num_peers + 1) as u64) * max_inflight; - peer_configs[i].connection_opts.soft_max_clients_per_host = - ((num_peers + 1) as u64) * max_inflight; - peer_configs[i].connection_opts.num_neighbors = (num_peers + 1) as u64; - peer_configs[i].connection_opts.soft_num_neighbors = (num_peers + 1) as u64; - } - - let initial_balances = vec![( - PrincipalData::from( - peer_configs[0].spending_account.origin_address().unwrap(), - ), - 1000000, - )]; - - for i in 0..peer_configs.len() { - peer_configs[i].initial_balances = initial_balances.clone(); - } - - // connectivity - let peer_0 = peer_configs[0].to_neighbor(); - for i in 1..peer_configs.len() { - peer_configs[i].add_neighbor(&peer_0); - let peer_i = peer_configs[i].to_neighbor(); - peer_configs[0].add_neighbor(&peer_i); - } - }, - |num_blocks, ref mut peers| { - let tip = SortitionDB::get_canonical_burn_chain_tip( - &peers[0].sortdb.as_ref().unwrap().conn(), - ) - .unwrap(); - let this_reward_cycle = peers[0] - .config - .burnchain - .block_height_to_reward_cycle(tip.block_height) - .unwrap(); - - // build up block data to replicate - let mut block_data = vec![]; - for _ in 0..num_blocks { - let tip = SortitionDB::get_canonical_burn_chain_tip( - &peers[0].sortdb.as_ref().unwrap().conn(), - ) - .unwrap(); - if peers[0] - .config - .burnchain - .block_height_to_reward_cycle(tip.block_height) - .unwrap() - != this_reward_cycle - { - continue; - } - let (mut burn_ops, stacks_block, microblocks) = - peers[0].make_default_tenure(); - - let (_, burn_header_hash, consensus_hash) = - peers[0].next_burnchain_block(burn_ops.clone()); - peers[0].process_stacks_epoch_at_tip(&stacks_block, µblocks); - - TestPeer::set_ops_burn_header_hash(&mut burn_ops, &burn_header_hash); - - for i in 1..peers.len() { - peers[i].next_burnchain_block_raw(burn_ops.clone()); - } - - let sn = SortitionDB::get_canonical_burn_chain_tip( - &peers[0].sortdb.as_ref().unwrap().conn(), - ) - .unwrap(); - - block_data.push(( - sn.consensus_hash.clone(), - Some(stacks_block), - Some(microblocks), - )); - } - *blocks_and_microblocks.borrow_mut() = block_data - .clone() - .drain(..) - .map(|(ch, blk_opt, mblocks_opt)| { - (ch, blk_opt.unwrap(), mblocks_opt.unwrap()) - }) - .collect(); - block_data - }, - |ref mut peers| { - for peer in peers.iter_mut() { - // force peers to keep trying to process buffered data - peer.network.burnchain_tip.burn_header_hash = - BurnchainHeaderHash([0u8; 32]); - } - - let done_flag = *done.borrow(); - - let mut connectivity_0_to_n = HashSet::new(); - let mut connectivity_n_to_0 = HashSet::new(); - - let peer_0_nk = peers[0].to_neighbor().addr; - - for (nk, event_id) in peers[0].network.events.iter() { - if let Some(convo) = peers[0].network.peers.get(event_id) { - if convo.is_authenticated() { - connectivity_0_to_n.insert(nk.clone()); - } - } - } - for i in 1..peers.len() { - for (nk, event_id) in peers[i].network.events.iter() { - if *nk != peer_0_nk { - continue; - } - - if let Some(convo) = peers[i].network.peers.get(event_id) { - if convo.is_authenticated() { - if let Some(inv_state) = &peers[i].network.inv_state { - if let Some(inv_stats) = - inv_state.block_stats.get(&peer_0_nk) - { - if inv_stats.inv.num_reward_cycles >= 5 { - connectivity_n_to_0 - .insert(peers[i].to_neighbor().addr); - } - } - } - } - } - } - } - - if connectivity_0_to_n.len() < peers.len() - 1 - || connectivity_n_to_0.len() < peers.len() - 1 - { - test_debug!( - "Network not connected: 0 --> N = {}, N --> 0 = {}", - connectivity_0_to_n.len(), - connectivity_n_to_0.len() - ); - return; - } - - let ((tip_consensus_hash, tip_block, _), idx) = { - let block_data = blocks_and_microblocks.borrow(); - let idx = blocks_idx.borrow(); - (block_data[(*idx as usize).saturating_sub(1)].clone(), *idx) - }; - - if idx > 0 { - let mut caught_up = true; - for i in 1..peers.len() { - peers[i] - .with_db_state(|sortdb, chainstate, relayer, mempool| { - let (canonical_consensus_hash, canonical_block_hash) = - SortitionDB::get_canonical_stacks_chain_tip_hash( - sortdb.conn(), - ) - .unwrap(); - - if canonical_consensus_hash != tip_consensus_hash - || canonical_block_hash != tip_block.block_hash() - { - debug!( - "Peer {} is not caught up yet (at {}/{}, need {}/{})", - i + 1, - &canonical_consensus_hash, - &canonical_block_hash, - &tip_consensus_hash, - &tip_block.block_hash() - ); - caught_up = false; - } - Ok(()) - }) - .unwrap(); - } - if !caught_up { - return; - } - } - - // caught up! - // find next block - let ((consensus_hash, block, microblocks), idx) = { - let block_data = blocks_and_microblocks.borrow(); - let mut idx = blocks_idx.borrow_mut(); - if *idx >= block_data.len() { - test_debug!("Out of blocks and microblocks to push"); - return; - } - - let ret = block_data[*idx].clone(); - *idx += 1; - (ret, *idx) - }; - - if !done_flag { - test_debug!( - "Broadcast block {}/{} and microblocks (idx = {})", - &consensus_hash, - block.block_hash(), - idx - ); - - let block_hash = block.block_hash(); - - // create a transaction against the current - // (anchored) chain tip - let tx = make_test_smart_contract_transaction( - &mut peers[0], - &format!("test-contract-{}", &block_hash.to_hex()[0..10]), - &tip_consensus_hash, - &tip_block.block_hash(), - ); - - let mut expected_txs = sent_txs.borrow_mut(); - expected_txs.push(tx.clone()); - - test_debug!( - "Broadcast {}/{} and its microblocks", - &consensus_hash, - &block.block_hash() - ); - // next block - broadcast_block(&mut peers[0], vec![], consensus_hash.clone(), block); - broadcast_microblocks( - &mut peers[0], - vec![], - consensus_hash, - block_hash, - microblocks, - ); - - // NOTE: first transaction will be dropped since the other nodes haven't - // processed the first-ever Stacks block when their relayer code gets - // around to considering it. - broadcast_transaction(&mut peers[0], vec![], tx); - } else { - test_debug!("Done pushing data"); - } - }, - |ref peer| { - // check peer health -- no message errors - // (i.e. no relay cycles) - for (_, convo) in peer.network.peers.iter() { - assert_eq!(convo.stats.msgs_err, 0); - } - true - }, - |ref mut peers| { - // all blocks downloaded. only stop if peer 1 has - // all the transactions - let mut done_flag = done.borrow_mut(); - *done_flag = true; - - let mut ret = true; - for i in 1..peers.len() { - let txs = MemPoolDB::get_all_txs(peers[1].mempool.as_ref().unwrap().conn()) - .unwrap(); - test_debug!("Peer {} has {} txs", i + 1, txs.len()); - ret = ret && txs.len() == sent_txs.borrow().len() - 1; - } - ret - }, - ); - - // peers 1..n should have all the transactions - let blocks_and_microblocks = blocks_and_microblocks.into_inner(); - let expected_txs = sent_txs.into_inner(); - - for i in 1..peers.len() { - let txs = - MemPoolDB::get_all_txs(peers[i].mempool.as_ref().unwrap().conn()).unwrap(); - for tx in txs.iter() { - let mut found = false; - for expected_tx in expected_txs.iter() { - if tx.tx.txid() == expected_tx.txid() { - found = true; - break; - } - } - if !found { - panic!("Transaction not found: {:?}", &tx.tx); - } - } - - // peers 1..n should have 1 tx per chain tip (except for the first block) - for ((consensus_hash, block, _), sent_tx) in - blocks_and_microblocks.iter().zip(expected_txs[1..].iter()) - { - let block_hash = block.block_hash(); - let tx_infos = MemPoolDB::get_txs_after( - peers[i].mempool.as_ref().unwrap().conn(), - consensus_hash, - &block_hash, - 0, - 1000, - ) - .unwrap(); - assert_eq!(tx_infos.len(), 1); - assert_eq!(tx_infos[0].tx.txid(), sent_tx.txid()); - } - } - }) - } - - #[test] - #[ignore] - fn test_get_blocks_and_microblocks_2_peers_antientropy() { - with_timeout(600, move || { - run_get_blocks_and_microblocks( - "test_get_blocks_and_microblocks_2_peers_antientropy", - 4240, - 2, - |ref mut peer_configs| { - // build initial network topology. - assert_eq!(peer_configs.len(), 2); - - // peer 0 mines blocks, but does not advertize them nor announce them as - // available via its inventory. It only uses its anti-entropy protocol to - // discover that peer 1 doesn't have them, and sends them to peer 1 that way. - peer_configs[0].connection_opts.disable_block_advertisement = true; - peer_configs[0].connection_opts.disable_block_download = true; - - peer_configs[1].connection_opts.disable_block_download = true; - peer_configs[1].connection_opts.disable_block_advertisement = true; - - // disable nat punches -- disconnect/reconnect - // clears inv state - peer_configs[0].connection_opts.disable_natpunch = true; - peer_configs[1].connection_opts.disable_natpunch = true; - - // permit anti-entropy protocol even if nat'ed - peer_configs[0].connection_opts.antientropy_public = true; - peer_configs[1].connection_opts.antientropy_public = true; - peer_configs[0].connection_opts.antientropy_retry = 1; - peer_configs[1].connection_opts.antientropy_retry = 1; - - // make peer 0 go slowly - peer_configs[0].connection_opts.max_block_push = 2; - peer_configs[0].connection_opts.max_microblock_push = 2; - - let peer_0 = peer_configs[0].to_neighbor(); - let peer_1 = peer_configs[1].to_neighbor(); - - // peer 0 is inbound to peer 1 - peer_configs[0].add_neighbor(&peer_1); - peer_configs[1].add_neighbor(&peer_0); - }, - |num_blocks, ref mut peers| { - let tip = SortitionDB::get_canonical_burn_chain_tip( - &peers[0].sortdb.as_ref().unwrap().conn(), - ) - .unwrap(); - let this_reward_cycle = peers[0] - .config - .burnchain - .block_height_to_reward_cycle(tip.block_height) - .unwrap(); - - // build up block data to replicate - let mut block_data = vec![]; - for _ in 0..num_blocks { - let tip = SortitionDB::get_canonical_burn_chain_tip( - &peers[0].sortdb.as_ref().unwrap().conn(), - ) - .unwrap(); - if peers[0] - .config - .burnchain - .block_height_to_reward_cycle(tip.block_height) - .unwrap() - != this_reward_cycle - { - continue; - } - let (mut burn_ops, stacks_block, microblocks) = - peers[0].make_default_tenure(); - - let (_, burn_header_hash, consensus_hash) = - peers[0].next_burnchain_block(burn_ops.clone()); - peers[0].process_stacks_epoch_at_tip(&stacks_block, µblocks); - - TestPeer::set_ops_burn_header_hash(&mut burn_ops, &burn_header_hash); - - for i in 1..peers.len() { - peers[i].next_burnchain_block_raw(burn_ops.clone()); - } - - let sn = SortitionDB::get_canonical_burn_chain_tip( - &peers[0].sortdb.as_ref().unwrap().conn(), - ) - .unwrap(); - block_data.push(( - sn.consensus_hash.clone(), - Some(stacks_block), - Some(microblocks), - )); - } - - // cap with an empty sortition, so the antientropy protocol picks up all stacks - // blocks - let (_, burn_header_hash, consensus_hash) = - peers[0].next_burnchain_block(vec![]); - for i in 1..peers.len() { - peers[i].next_burnchain_block_raw(vec![]); - } - let sn = SortitionDB::get_canonical_burn_chain_tip( - &peers[0].sortdb.as_ref().unwrap().conn(), - ) - .unwrap(); - block_data.push((sn.consensus_hash.clone(), None, None)); - - block_data - }, - |ref mut peers| { - for peer in peers.iter_mut() { - // force peers to keep trying to process buffered data - peer.network.burnchain_tip.burn_header_hash = - BurnchainHeaderHash([0u8; 32]); - } - - let tip_opt = peers[1] - .with_db_state(|sortdb, chainstate, _, _| { - let tip_opt = NakamotoChainState::get_canonical_block_header( - chainstate.db(), - sortdb, - ) - .unwrap(); - Ok(tip_opt) - }) - .unwrap(); - }, - |ref peer| { - // check peer health - // nothing should break - // TODO - true - }, - |_| true, - ); - }) - } - - #[test] - #[ignore] - fn test_get_blocks_and_microblocks_2_peers_buffered_messages() { - with_timeout(600, move || { - let sortitions = RefCell::new(vec![]); - let blocks_and_microblocks = RefCell::new(vec![]); - let idx = RefCell::new(0usize); - let pushed_idx = RefCell::new(0usize); - run_get_blocks_and_microblocks( - "test_get_blocks_and_microblocks_2_peers_buffered_messages", - 4242, - 2, - |ref mut peer_configs| { - // build initial network topology. - assert_eq!(peer_configs.len(), 2); - - // peer 0 mines blocks, but it does not present its inventory. - peer_configs[0].connection_opts.disable_inv_chat = true; - peer_configs[0].connection_opts.disable_block_download = true; - - peer_configs[1].connection_opts.disable_block_download = true; - peer_configs[1].connection_opts.disable_block_advertisement = true; - - // disable nat punches -- disconnect/reconnect - // clears inv state - peer_configs[0].connection_opts.disable_natpunch = true; - peer_configs[1].connection_opts.disable_natpunch = true; - - // peer 0 ignores peer 1's handshakes - peer_configs[0].connection_opts.disable_inbound_handshakes = true; - - // disable anti-entropy - peer_configs[0].connection_opts.max_block_push = 0; - peer_configs[0].connection_opts.max_microblock_push = 0; - - let peer_0 = peer_configs[0].to_neighbor(); - let peer_1 = peer_configs[1].to_neighbor(); - - // peer 0 is inbound to peer 1 - peer_configs[0].add_neighbor(&peer_1); - peer_configs[1].add_neighbor(&peer_0); - }, - |num_blocks, ref mut peers| { - let tip = SortitionDB::get_canonical_burn_chain_tip( - &peers[0].sortdb.as_ref().unwrap().conn(), - ) - .unwrap(); - let this_reward_cycle = peers[0] - .config - .burnchain - .block_height_to_reward_cycle(tip.block_height) - .unwrap(); - - // build up block data to replicate - let mut block_data = vec![]; - for block_num in 0..num_blocks { - let tip = SortitionDB::get_canonical_burn_chain_tip( - &peers[0].sortdb.as_ref().unwrap().conn(), - ) - .unwrap(); - let (mut burn_ops, stacks_block, microblocks) = - peers[0].make_default_tenure(); - - let (_, burn_header_hash, consensus_hash) = - peers[0].next_burnchain_block(burn_ops.clone()); - peers[0].process_stacks_epoch_at_tip(&stacks_block, µblocks); - - TestPeer::set_ops_burn_header_hash(&mut burn_ops, &burn_header_hash); - - if block_num == 0 { - for i in 1..peers.len() { - peers[i].next_burnchain_block_raw(burn_ops.clone()); - peers[i].process_stacks_epoch_at_tip(&stacks_block, µblocks); - } - } else { - let mut all_sortitions = sortitions.borrow_mut(); - all_sortitions.push(burn_ops.clone()); - } - - let sn = SortitionDB::get_canonical_burn_chain_tip( - &peers[0].sortdb.as_ref().unwrap().conn(), - ) - .unwrap(); - block_data.push(( - sn.consensus_hash.clone(), - Some(stacks_block), - Some(microblocks), - )); - } - *blocks_and_microblocks.borrow_mut() = block_data.clone()[1..] - .to_vec() - .drain(..) - .map(|(ch, blk_opt, mblocks_opt)| { - (ch, blk_opt.unwrap(), mblocks_opt.unwrap()) - }) - .collect(); - block_data - }, - |ref mut peers| { - for peer in peers.iter_mut() { - // force peers to keep trying to process buffered data - peer.network.burnchain_tip.burn_header_hash = - BurnchainHeaderHash([0u8; 32]); - } - - let mut i = idx.borrow_mut(); - let mut pushed_i = pushed_idx.borrow_mut(); - let all_sortitions = sortitions.borrow(); - let all_blocks_and_microblocks = blocks_and_microblocks.borrow(); - let peer_0_nk = peers[0].to_neighbor().addr; - let peer_1_nk = peers[1].to_neighbor().addr; - - let tip_opt = peers[1] - .with_db_state(|sortdb, chainstate, _, _| { - let tip_opt = NakamotoChainState::get_canonical_block_header( - chainstate.db(), - sortdb, - ) - .unwrap(); - Ok(tip_opt) - }) - .unwrap(); - - if !is_peer_connected(&peers[0], &peer_1_nk) { - debug!("Peer 0 not connected to peer 1"); - return; - } - - if let Some(tip) = tip_opt { - debug!( - "Push at {}, need {}", - tip.anchored_header.height() - - peers[1].config.burnchain.first_block_height - - 1, - *pushed_i - ); - if tip.anchored_header.height() - - peers[1].config.burnchain.first_block_height - - 1 - == *pushed_i as u64 - { - // next block - push_block( - &mut peers[0], - &peer_1_nk, - vec![], - (*all_blocks_and_microblocks)[*pushed_i].0.clone(), - (*all_blocks_and_microblocks)[*pushed_i].1.clone(), - ); - push_microblocks( - &mut peers[0], - &peer_1_nk, - vec![], - (*all_blocks_and_microblocks)[*pushed_i].0.clone(), - (*all_blocks_and_microblocks)[*pushed_i].1.block_hash(), - (*all_blocks_and_microblocks)[*pushed_i].2.clone(), - ); - *pushed_i += 1; - } - debug!( - "Sortition at {}, need {}", - tip.anchored_header.height() - - peers[1].config.burnchain.first_block_height - - 1, - *i - ); - if tip.anchored_header.height() - - peers[1].config.burnchain.first_block_height - - 1 - == *i as u64 - { - let event_id = { - let mut ret = 0; - for (nk, event_id) in peers[1].network.events.iter() { - ret = *event_id; - break; - } - if ret == 0 { - return; - } - ret - }; - let mut update_sortition = false; - for (event_id, pending) in peers[1].network.pending_messages.iter() { - debug!("Pending at {} is ({}, {})", *i, event_id, pending.len()); - if pending.len() >= 1 { - update_sortition = true; - } - } - if update_sortition { - debug!("Advance sortition!"); - peers[1].next_burnchain_block_raw((*all_sortitions)[*i].clone()); - *i += 1; - } - } - } - }, - |ref peer| { - // check peer health - // nothing should break - // TODO - true - }, - |_| true, - ); - }) - } - - pub fn make_contract_tx( - sender: &StacksPrivateKey, - cur_nonce: u64, - tx_fee: u64, - name: &str, - contract: &str, - ) -> StacksTransaction { - let sender_spending_condition = TransactionSpendingCondition::new_singlesig_p2pkh( - StacksPublicKey::from_private(sender), - ) - .expect("Failed to create p2pkh spending condition from public key."); - - let spending_auth = TransactionAuth::Standard(sender_spending_condition); - - let mut tx_contract = StacksTransaction::new( - TransactionVersion::Testnet, - spending_auth.clone(), - TransactionPayload::new_smart_contract(&name.to_string(), &contract.to_string(), None) - .unwrap(), - ); - - tx_contract.chain_id = 0x80000000; - tx_contract.auth.set_origin_nonce(cur_nonce); - tx_contract.set_tx_fee(tx_fee); - - let mut tx_signer = StacksTransactionSigner::new(&tx_contract); - tx_signer.sign_origin(sender).unwrap(); - - let tx_contract_signed = tx_signer.get_tx().unwrap(); - tx_contract_signed - } - - #[test] - fn test_static_problematic_tests() { - let spender_sk_1 = StacksPrivateKey::new(); - let spender_sk_2 = StacksPrivateKey::new(); - let spender_sk_3 = StacksPrivateKey::new(); - - let edge_repeat_factor = AST_CALL_STACK_DEPTH_BUFFER + (MAX_CALL_STACK_DEPTH as u64) - 1; - let tx_edge_body_start = "{ a : ".repeat(edge_repeat_factor as usize); - let tx_edge_body_end = "} ".repeat(edge_repeat_factor as usize); - let tx_edge_body = format!("{}u1 {}", tx_edge_body_start, tx_edge_body_end); - - let tx_edge = make_contract_tx( - &spender_sk_1, - 0, - (tx_edge_body.len() * 100) as u64, - "test-edge", - &tx_edge_body, - ); - - // something just over the limit of the expression depth - let exceeds_repeat_factor = edge_repeat_factor + 1; - let tx_exceeds_body_start = "{ a : ".repeat(exceeds_repeat_factor as usize); - let tx_exceeds_body_end = "} ".repeat(exceeds_repeat_factor as usize); - let tx_exceeds_body = format!("{}u1 {}", tx_exceeds_body_start, tx_exceeds_body_end); - - let tx_exceeds = make_contract_tx( - &spender_sk_2, - 0, - (tx_exceeds_body.len() * 100) as u64, - "test-exceeds", - &tx_exceeds_body, - ); - - // something stupidly high over the expression depth - let high_repeat_factor = 128 * 1024; - let tx_high_body_start = "{ a : ".repeat(high_repeat_factor as usize); - let tx_high_body_end = "} ".repeat(high_repeat_factor as usize); - let tx_high_body = format!("{}u1 {}", tx_high_body_start, tx_high_body_end); - - let tx_high = make_contract_tx( - &spender_sk_3, - 0, - (tx_high_body.len() * 100) as u64, - "test-high", - &tx_high_body, - ); - assert!(Relayer::static_check_problematic_relayed_tx( - false, - StacksEpochId::Epoch2_05, - &tx_edge, - ASTRules::Typical - ) - .is_ok()); - assert!(Relayer::static_check_problematic_relayed_tx( - false, - StacksEpochId::Epoch2_05, - &tx_exceeds, - ASTRules::Typical - ) - .is_ok()); - assert!(Relayer::static_check_problematic_relayed_tx( - false, - StacksEpochId::Epoch2_05, - &tx_high, - ASTRules::Typical - ) - .is_ok()); - - assert!(Relayer::static_check_problematic_relayed_tx( - false, - StacksEpochId::Epoch2_05, - &tx_edge, - ASTRules::Typical - ) - .is_ok()); - assert!(!Relayer::static_check_problematic_relayed_tx( - false, - StacksEpochId::Epoch2_05, - &tx_exceeds, - ASTRules::PrecheckSize - ) - .is_ok()); - assert!(!Relayer::static_check_problematic_relayed_tx( - false, - StacksEpochId::Epoch2_05, - &tx_high, - ASTRules::PrecheckSize - ) - .is_ok()); - } - - #[test] - fn process_new_blocks_rejects_problematic_asts() { - let privk = StacksPrivateKey::from_hex( - "42faca653724860da7a41bfcef7e6ba78db55146f6900de8cb2a9f760ffac70c01", - ) - .unwrap(); - let addr = StacksAddress::from_public_keys( - C32_ADDRESS_VERSION_TESTNET_SINGLESIG, - &AddressHashMode::SerializeP2PKH, - 1, - &vec![StacksPublicKey::from_private(&privk)], - ) - .unwrap(); - - let initial_balances = vec![(addr.to_account_principal(), 100000000000)]; - - let mut peer_config = TestPeerConfig::new(function_name!(), 32019, 32020); - peer_config.initial_balances = initial_balances; - peer_config.epochs = Some(vec![ - StacksEpoch { - epoch_id: StacksEpochId::Epoch20, - start_height: 0, - end_height: 1, - block_limit: ExecutionCost::max_value(), - network_epoch: PEER_VERSION_EPOCH_2_0, - }, - StacksEpoch { - epoch_id: StacksEpochId::Epoch2_05, - start_height: 1, - end_height: i64::MAX as u64, - block_limit: ExecutionCost::max_value(), - network_epoch: PEER_VERSION_EPOCH_2_05, - }, - ]); - let burnchain = peer_config.burnchain.clone(); - - // activate new AST rules right away - let mut peer = TestPeer::new(peer_config); - let mut sortdb = peer.sortdb.take().unwrap(); - { - let mut tx = sortdb - .tx_begin() - .expect("FATAL: failed to begin tx on sortition DB"); - SortitionDB::override_ast_rule_height(&mut tx, ASTRules::PrecheckSize, 1) - .expect("FATAL: failed to override AST PrecheckSize rule height"); - tx.commit() - .expect("FATAL: failed to commit sortition DB transaction"); - } - peer.sortdb = Some(sortdb); - - let chainstate_path = peer.chainstate_path.clone(); - - let first_stacks_block_height = { - let sn = - SortitionDB::get_canonical_burn_chain_tip(&peer.sortdb.as_ref().unwrap().conn()) - .unwrap(); - sn.block_height - }; - - let recipient_addr_str = "ST1RFD5Q2QPK3E0F08HG9XDX7SSC7CNRS0QR0SGEV"; - let recipient = StacksAddress::from_string(recipient_addr_str).unwrap(); - - let high_repeat_factor = 128 * 1024; - let tx_high_body_start = "{ a : ".repeat(high_repeat_factor as usize); - let tx_high_body_end = "} ".repeat(high_repeat_factor as usize); - let tx_high_body = format!("{}u1 {}", tx_high_body_start, tx_high_body_end); - - let bad_tx = make_contract_tx( - &privk, - 0, - (tx_high_body.len() * 100) as u64, - "test-high", - &tx_high_body, - ); - let bad_txid = bad_tx.txid(); - let bad_tx_len = { - let mut bytes = vec![]; - bad_tx.consensus_serialize(&mut bytes).unwrap(); - bytes.len() as u64 - }; - - let tip = SortitionDB::get_canonical_burn_chain_tip(&peer.sortdb.as_ref().unwrap().conn()) - .unwrap(); - - let mblock_privk = StacksPrivateKey::new(); - - // make one tenure with a valid block, but problematic microblocks - let (burn_ops, block, microblocks) = peer.make_tenure( - |ref mut miner, - ref mut sortdb, - ref mut chainstate, - vrf_proof, - ref parent_opt, - ref parent_microblock_header_opt| { - let parent_tip = match parent_opt { - None => StacksChainState::get_genesis_header_info(chainstate.db()).unwrap(), - Some(block) => { - let ic = sortdb.index_conn(); - let snapshot = SortitionDB::get_block_snapshot_for_winning_stacks_block( - &ic, - &tip.sortition_id, - &block.block_hash(), - ) - .unwrap() - .unwrap(); // succeeds because we don't fork - StacksChainState::get_anchored_block_header_info( - chainstate.db(), - &snapshot.consensus_hash, - &snapshot.winning_stacks_block_hash, - ) - .unwrap() - .unwrap() - } - }; - - let parent_header_hash = parent_tip.anchored_header.block_hash(); - let parent_consensus_hash = parent_tip.consensus_hash.clone(); - let coinbase_tx = make_coinbase(miner, 0); - - let block_builder = StacksBlockBuilder::make_regtest_block_builder( - &burnchain, - &parent_tip, - vrf_proof.clone(), - tip.total_burn, - Hash160::from_node_public_key(&StacksPublicKey::from_private(&mblock_privk)), - ) - .unwrap(); - - let block = StacksBlockBuilder::make_anchored_block_from_txs( - block_builder, - chainstate, - &sortdb.index_conn(), - vec![coinbase_tx.clone()], - ) - .unwrap() - .0; - - (block, vec![]) - }, - ); - - let (_, _, consensus_hash) = peer.next_burnchain_block(burn_ops.clone()); - peer.process_stacks_epoch(&block, &consensus_hash, &vec![]); - - let tip = SortitionDB::get_canonical_burn_chain_tip(&peer.sortdb.as_ref().unwrap().conn()) - .unwrap(); - - let (burn_ops, bad_block, mut microblocks) = peer.make_tenure( - |ref mut miner, - ref mut sortdb, - ref mut chainstate, - vrf_proof, - ref parent_opt, - ref parent_microblock_header_opt| { - let parent_tip = match parent_opt { - None => StacksChainState::get_genesis_header_info(chainstate.db()).unwrap(), - Some(block) => { - let ic = sortdb.index_conn(); - let snapshot = SortitionDB::get_block_snapshot_for_winning_stacks_block( - &ic, - &tip.sortition_id, - &block.block_hash(), - ) - .unwrap() - .unwrap(); // succeeds because we don't fork - StacksChainState::get_anchored_block_header_info( - chainstate.db(), - &snapshot.consensus_hash, - &snapshot.winning_stacks_block_hash, - ) - .unwrap() - .unwrap() - } - }; - - let parent_header_hash = parent_tip.anchored_header.block_hash(); - let parent_consensus_hash = parent_tip.consensus_hash.clone(); - let parent_index_hash = StacksBlockHeader::make_index_block_hash( - &parent_consensus_hash, - &parent_header_hash, - ); - let coinbase_tx = make_coinbase(miner, 0); - - let mblock_privk = miner.next_microblock_privkey(); - let block_builder = StacksBlockBuilder::make_regtest_block_builder( - &burnchain, - &parent_tip, - vrf_proof.clone(), - tip.total_burn, - Hash160::from_node_public_key(&StacksPublicKey::from_private(&mblock_privk)), - ) - .unwrap(); - - // this tx would be problematic without our checks - if let Err(ChainstateError::ProblematicTransaction(txid)) = - StacksBlockBuilder::make_anchored_block_from_txs( - block_builder, - chainstate, - &sortdb.index_conn(), - vec![coinbase_tx.clone(), bad_tx.clone()], - ) - { - assert_eq!(txid, bad_txid); - } else { - panic!("Did not get Error::ProblematicTransaction"); - } - - // make a bad block anyway - // don't worry about the state root - let block_builder = StacksBlockBuilder::make_regtest_block_builder( - &burnchain, - &parent_tip, - vrf_proof.clone(), - tip.total_burn, - Hash160::from_node_public_key(&StacksPublicKey::from_private(&mblock_privk)), - ) - .unwrap(); - let bad_block = StacksBlockBuilder::make_anchored_block_from_txs( - block_builder, - chainstate, - &sortdb.index_conn(), - vec![coinbase_tx.clone()], - ) - .unwrap(); - - let mut bad_block = bad_block.0; - bad_block.txs.push(bad_tx.clone()); - - let txid_vecs = bad_block - .txs - .iter() - .map(|tx| tx.txid().as_bytes().to_vec()) - .collect(); - - let merkle_tree = MerkleTree::::new(&txid_vecs); - bad_block.header.tx_merkle_root = merkle_tree.root(); - - let sort_ic = sortdb.index_conn(); - chainstate - .reload_unconfirmed_state(&sort_ic, parent_index_hash.clone()) - .unwrap(); - - // make a bad microblock - let mut microblock_builder = StacksMicroblockBuilder::new( - parent_header_hash.clone(), - parent_consensus_hash.clone(), - chainstate, - &sort_ic, - BlockBuilderSettings::max_value(), - ) - .unwrap(); - - // miner should fail with just the bad tx, since it's problematic - let mblock_err = microblock_builder - .mine_next_microblock_from_txs( - vec![(bad_tx.clone(), bad_tx_len)], - &mblock_privk, - ) - .unwrap_err(); - if let ChainstateError::NoTransactionsToMine = mblock_err { - } else { - panic!("Did not get NoTransactionsToMine"); - } - - let token_transfer = make_user_stacks_transfer( - &privk, - 0, - 200, - &recipient.to_account_principal(), - 123, - ); - let tt_len = { - let mut bytes = vec![]; - token_transfer.consensus_serialize(&mut bytes).unwrap(); - bytes.len() as u64 - }; - - let mut bad_mblock = microblock_builder - .mine_next_microblock_from_txs( - vec![(token_transfer, tt_len), (bad_tx.clone(), bad_tx_len)], - &mblock_privk, - ) - .unwrap(); - - // miner shouldn't include the bad tx, since it's problematic - assert_eq!(bad_mblock.txs.len(), 1); - bad_mblock.txs.push(bad_tx.clone()); - - // force it in anyway - let txid_vecs = bad_mblock - .txs - .iter() - .map(|tx| tx.txid().as_bytes().to_vec()) - .collect(); - - let merkle_tree = MerkleTree::::new(&txid_vecs); - bad_mblock.header.tx_merkle_root = merkle_tree.root(); - bad_mblock.sign(&mblock_privk).unwrap(); - - (bad_block, vec![bad_mblock]) - }, - ); - - let bad_mblock = microblocks.pop().unwrap(); - let (_, _, new_consensus_hash) = peer.next_burnchain_block(burn_ops.clone()); - peer.process_stacks_epoch(&bad_block, &new_consensus_hash, &vec![]); - - // stuff them all into each possible field of NetworkResult - // p2p messages - let nk = NeighborKey { - peer_version: 1, - network_id: 2, - addrbytes: PeerAddress([3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18]), - port: 19, - }; - let preamble = Preamble { - peer_version: 1, - network_id: 2, - seq: 3, - burn_block_height: 4, - burn_block_hash: BurnchainHeaderHash([5u8; 32]), - burn_stable_block_height: 6, - burn_stable_block_hash: BurnchainHeaderHash([7u8; 32]), - additional_data: 8, - signature: MessageSignature([9u8; 65]), - payload_len: 10, - }; - let bad_msgs = vec![ - StacksMessage { - preamble: preamble.clone(), - relayers: vec![], - payload: StacksMessageType::Blocks(BlocksData { - blocks: vec![BlocksDatum(new_consensus_hash.clone(), bad_block.clone())], - }), - }, - StacksMessage { - preamble: preamble.clone(), - relayers: vec![], - payload: StacksMessageType::Microblocks(MicroblocksData { - index_anchor_block: StacksBlockId::new( - &new_consensus_hash, - &bad_block.block_hash(), - ), - microblocks: vec![bad_mblock.clone()], - }), - }, - StacksMessage { - preamble: preamble.clone(), - relayers: vec![], - payload: StacksMessageType::Transaction(bad_tx.clone()), - }, - ]; - let mut unsolicited = HashMap::new(); - unsolicited.insert(nk.clone(), bad_msgs.clone()); - - let mut network_result = - NetworkResult::new(0, 0, 0, 0, ConsensusHash([0x01; 20]), HashMap::new()); - network_result.consume_unsolicited(unsolicited); - - assert!(network_result.has_blocks()); - assert!(network_result.has_microblocks()); - assert!(network_result.has_transactions()); - - network_result.consume_http_uploads( - bad_msgs - .into_iter() - .map(|msg| msg.payload) - .collect::>(), - ); - - assert!(network_result.has_blocks()); - assert!(network_result.has_microblocks()); - assert!(network_result.has_transactions()); - - assert_eq!(network_result.uploaded_transactions.len(), 1); - assert_eq!(network_result.uploaded_blocks.len(), 1); - assert_eq!(network_result.uploaded_microblocks.len(), 1); - assert_eq!(network_result.pushed_transactions.len(), 1); - assert_eq!(network_result.pushed_blocks.len(), 1); - assert_eq!(network_result.pushed_microblocks.len(), 1); - - network_result - .blocks - .push((new_consensus_hash.clone(), bad_block.clone(), 123)); - network_result.confirmed_microblocks.push(( - new_consensus_hash.clone(), - vec![bad_mblock.clone()], - 234, - )); - - let mut sortdb = peer.sortdb.take().unwrap(); - let (processed_blocks, processed_mblocks, relay_mblocks, bad_neighbors) = - Relayer::process_new_blocks( - &mut network_result, - &mut sortdb, - &mut peer.stacks_node.as_mut().unwrap().chainstate, - None, - ) - .unwrap(); - - // despite this data showing up in all aspects of the network result, none of it actually - // gets relayed - assert_eq!(processed_blocks.len(), 0); - assert_eq!(processed_mblocks.len(), 0); - assert_eq!(relay_mblocks.len(), 0); - assert_eq!(bad_neighbors.len(), 0); - - let txs_relayed = Relayer::process_transactions( - &mut network_result, - &sortdb, - &mut peer.stacks_node.as_mut().unwrap().chainstate, - &mut peer.mempool.as_mut().unwrap(), - None, - ) - .unwrap(); - assert_eq!(txs_relayed.len(), 0); - } - - #[test] - fn test_block_pay_to_contract_gated_at_v210() { - let mut peer_config = TestPeerConfig::new(function_name!(), 4246, 4247); - let epochs = vec![ - StacksEpoch { - epoch_id: StacksEpochId::Epoch10, - start_height: 0, - end_height: 0, - block_limit: ExecutionCost::max_value(), - network_epoch: PEER_VERSION_EPOCH_1_0, - }, - StacksEpoch { - epoch_id: StacksEpochId::Epoch20, - start_height: 0, - end_height: 0, - block_limit: ExecutionCost::max_value(), - network_epoch: PEER_VERSION_EPOCH_2_0, - }, - StacksEpoch { - epoch_id: StacksEpochId::Epoch2_05, - start_height: 0, - end_height: 28, // NOTE: the first 25 burnchain blocks have no sortition - block_limit: ExecutionCost::max_value(), - network_epoch: PEER_VERSION_EPOCH_2_05, - }, - StacksEpoch { - epoch_id: StacksEpochId::Epoch21, - start_height: 28, - end_height: STACKS_EPOCH_MAX, - block_limit: ExecutionCost::max_value(), - network_epoch: PEER_VERSION_EPOCH_2_1, - }, - ]; - peer_config.epochs = Some(epochs); - let burnchain = peer_config.burnchain.clone(); - - let mut peer = TestPeer::new(peer_config); - - let mut make_tenure = - |miner: &mut TestMiner, - sortdb: &mut SortitionDB, - chainstate: &mut StacksChainState, - vrfproof: VRFProof, - parent_opt: Option<&StacksBlock>, - microblock_parent_opt: Option<&StacksMicroblockHeader>| { - let tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); - - let stacks_tip_opt = - NakamotoChainState::get_canonical_block_header(chainstate.db(), sortdb) - .unwrap(); - let parent_tip = match stacks_tip_opt { - None => StacksChainState::get_genesis_header_info(chainstate.db()).unwrap(), - Some(header_tip) => { - let ic = sortdb.index_conn(); - let snapshot = SortitionDB::get_block_snapshot_for_winning_stacks_block( - &ic, - &tip.sortition_id, - &header_tip.anchored_header.block_hash(), - ) - .unwrap() - .unwrap(); // succeeds because we don't fork - StacksChainState::get_anchored_block_header_info( - chainstate.db(), - &snapshot.consensus_hash, - &snapshot.winning_stacks_block_hash, - ) - .unwrap() - .unwrap() - } - }; - - let parent_header_hash = parent_tip.anchored_header.block_hash(); - let parent_consensus_hash = parent_tip.consensus_hash.clone(); - let parent_index_hash = StacksBlockHeader::make_index_block_hash( - &parent_consensus_hash, - &parent_header_hash, - ); - - let coinbase_tx = make_coinbase_with_nonce( - miner, - parent_tip.stacks_block_height as usize, - 0, - Some(PrincipalData::Contract( - QualifiedContractIdentifier::parse("ST000000000000000000002AMW42H.bns") - .unwrap(), - )), - ); - - let mut mblock_pubkey_hash_bytes = [0u8; 20]; - mblock_pubkey_hash_bytes.copy_from_slice(&coinbase_tx.txid()[0..20]); - - let builder = StacksBlockBuilder::make_block_builder( - &burnchain, - chainstate.mainnet, - &parent_tip, - vrfproof, - tip.total_burn, - Hash160(mblock_pubkey_hash_bytes), - ) - .unwrap(); - - let anchored_block = StacksBlockBuilder::make_anchored_block_from_txs( - builder, - chainstate, - &sortdb.index_conn(), - vec![coinbase_tx], - ) - .unwrap(); - - (anchored_block.0, vec![]) - }; - - // tenures 26 and 27 should fail, since the block is a pay-to-contract block - // Pay-to-contract should only be supported if the block is in epoch 2.1, which - // activates at tenure 27. - for i in 0..2 { - let (burn_ops, stacks_block, microblocks) = peer.make_tenure(&mut make_tenure); - let (_, _, consensus_hash) = peer.next_burnchain_block(burn_ops.clone()); - - let sortdb = peer.sortdb.take().unwrap(); - let mut node = peer.stacks_node.take().unwrap(); - match Relayer::process_new_anchored_block( - &sortdb.index_conn(), - &mut node.chainstate, - &consensus_hash, - &stacks_block, - 123, - ) { - Ok(x) => { - panic!("Stored pay-to-contract stacks block before epoch 2.1"); - } - Err(chainstate_error::InvalidStacksBlock(_)) => {} - Err(e) => { - panic!("Got unexpected error {:?}", &e); - } - }; - peer.sortdb = Some(sortdb); - peer.stacks_node = Some(node); - } - - // *now* it should succeed, since tenure 28 was in epoch 2.1 - let (burn_ops, stacks_block, microblocks) = peer.make_tenure(&mut make_tenure); - - let (_, _, consensus_hash) = peer.next_burnchain_block(burn_ops.clone()); - - let sortdb = peer.sortdb.take().unwrap(); - let mut node = peer.stacks_node.take().unwrap(); - match Relayer::process_new_anchored_block( - &sortdb.index_conn(), - &mut node.chainstate, - &consensus_hash, - &stacks_block, - 123, - ) { - Ok(x) => { - assert!(x, "Failed to process valid pay-to-contract block"); - } - Err(e) => { - panic!("Got unexpected error {:?}", &e); - } - }; - peer.sortdb = Some(sortdb); - peer.stacks_node = Some(node); - } - - #[test] - fn test_block_versioned_smart_contract_gated_at_v210() { - let mut peer_config = TestPeerConfig::new(function_name!(), 4248, 4249); - - let initial_balances = vec![( - PrincipalData::from(peer_config.spending_account.origin_address().unwrap()), - 1000000, - )]; - - let epochs = vec![ - StacksEpoch { - epoch_id: StacksEpochId::Epoch10, - start_height: 0, - end_height: 0, - block_limit: ExecutionCost::max_value(), - network_epoch: PEER_VERSION_EPOCH_1_0, - }, - StacksEpoch { - epoch_id: StacksEpochId::Epoch20, - start_height: 0, - end_height: 0, - block_limit: ExecutionCost::max_value(), - network_epoch: PEER_VERSION_EPOCH_2_0, - }, - StacksEpoch { - epoch_id: StacksEpochId::Epoch2_05, - start_height: 0, - end_height: 28, // NOTE: the first 25 burnchain blocks have no sortition - block_limit: ExecutionCost::max_value(), - network_epoch: PEER_VERSION_EPOCH_2_05, - }, - StacksEpoch { - epoch_id: StacksEpochId::Epoch21, - start_height: 28, - end_height: STACKS_EPOCH_MAX, - block_limit: ExecutionCost::max_value(), - network_epoch: PEER_VERSION_EPOCH_2_1, - }, - ]; - - peer_config.epochs = Some(epochs); - peer_config.initial_balances = initial_balances; - let burnchain = peer_config.burnchain.clone(); - - let mut peer = TestPeer::new(peer_config); - - let mut make_tenure = - |miner: &mut TestMiner, - sortdb: &mut SortitionDB, - chainstate: &mut StacksChainState, - vrfproof: VRFProof, - parent_opt: Option<&StacksBlock>, - microblock_parent_opt: Option<&StacksMicroblockHeader>| { - let tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); - - let stacks_tip_opt = - NakamotoChainState::get_canonical_block_header(chainstate.db(), sortdb) - .unwrap(); - let parent_tip = match stacks_tip_opt { - None => StacksChainState::get_genesis_header_info(chainstate.db()).unwrap(), - Some(header_tip) => { - let ic = sortdb.index_conn(); - let snapshot = SortitionDB::get_block_snapshot_for_winning_stacks_block( - &ic, - &tip.sortition_id, - &header_tip.anchored_header.block_hash(), - ) - .unwrap() - .unwrap(); // succeeds because we don't fork - StacksChainState::get_anchored_block_header_info( - chainstate.db(), - &snapshot.consensus_hash, - &snapshot.winning_stacks_block_hash, - ) - .unwrap() - .unwrap() - } - }; - - let parent_header_hash = parent_tip.anchored_header.block_hash(); - let parent_consensus_hash = parent_tip.consensus_hash.clone(); - let parent_index_hash = StacksBlockHeader::make_index_block_hash( - &parent_consensus_hash, - &parent_header_hash, - ); - - let coinbase_tx = make_coinbase_with_nonce( - miner, - parent_tip.stacks_block_height as usize, - 0, - None, - ); - - let versioned_contract = make_smart_contract_with_version( - miner, - 1, - tip.block_height.try_into().unwrap(), - 0, - Some(ClarityVersion::Clarity1), - Some(1000), - ); - - let mut mblock_pubkey_hash_bytes = [0u8; 20]; - mblock_pubkey_hash_bytes.copy_from_slice(&coinbase_tx.txid()[0..20]); - - let builder = StacksBlockBuilder::make_block_builder( - &burnchain, - chainstate.mainnet, - &parent_tip, - vrfproof, - tip.total_burn, - Hash160(mblock_pubkey_hash_bytes), - ) - .unwrap(); - - let anchored_block = StacksBlockBuilder::make_anchored_block_from_txs( - builder, - chainstate, - &sortdb.index_conn(), - vec![coinbase_tx, versioned_contract], - ) - .unwrap(); - - eprintln!("{:?}", &anchored_block.0); - (anchored_block.0, vec![]) - }; - - // tenures 26 and 27 should fail, since the block contains a versioned smart contract. - // Versioned smart contracts should only be supported if the block is in epoch 2.1, which - // activates at tenure 27. - for i in 0..2 { - let (burn_ops, stacks_block, microblocks) = peer.make_tenure(&mut make_tenure); - let (_, _, consensus_hash) = peer.next_burnchain_block(burn_ops.clone()); - - let sortdb = peer.sortdb.take().unwrap(); - let mut node = peer.stacks_node.take().unwrap(); - match Relayer::process_new_anchored_block( - &sortdb.index_conn(), - &mut node.chainstate, - &consensus_hash, - &stacks_block, - 123, - ) { - Ok(x) => { - eprintln!("{:?}", &stacks_block); - panic!("Stored pay-to-contract stacks block before epoch 2.1"); - } - Err(chainstate_error::InvalidStacksBlock(_)) => {} - Err(e) => { - panic!("Got unexpected error {:?}", &e); - } - }; - peer.sortdb = Some(sortdb); - peer.stacks_node = Some(node); - } - - // *now* it should succeed, since tenure 28 was in epoch 2.1 - let (burn_ops, stacks_block, microblocks) = peer.make_tenure(&mut make_tenure); - - let (_, _, consensus_hash) = peer.next_burnchain_block(burn_ops.clone()); - - let sortdb = peer.sortdb.take().unwrap(); - let mut node = peer.stacks_node.take().unwrap(); - match Relayer::process_new_anchored_block( - &sortdb.index_conn(), - &mut node.chainstate, - &consensus_hash, - &stacks_block, - 123, - ) { - Ok(x) => { - assert!(x, "Failed to process valid versioned smart contract block"); - } - Err(e) => { - panic!("Got unexpected error {:?}", &e); - } - }; - peer.sortdb = Some(sortdb); - peer.stacks_node = Some(node); - } - - #[test] - fn test_block_versioned_smart_contract_mempool_rejection_until_v210() { - let mut peer_config = TestPeerConfig::new(function_name!(), 4250, 4251); - - let initial_balances = vec![( - PrincipalData::from(peer_config.spending_account.origin_address().unwrap()), - 1000000, - )]; - - let epochs = vec![ - StacksEpoch { - epoch_id: StacksEpochId::Epoch10, - start_height: 0, - end_height: 0, - block_limit: ExecutionCost::max_value(), - network_epoch: PEER_VERSION_EPOCH_1_0, - }, - StacksEpoch { - epoch_id: StacksEpochId::Epoch20, - start_height: 0, - end_height: 0, - block_limit: ExecutionCost::max_value(), - network_epoch: PEER_VERSION_EPOCH_2_0, - }, - StacksEpoch { - epoch_id: StacksEpochId::Epoch2_05, - start_height: 0, - end_height: 28, // NOTE: the first 25 burnchain blocks have no sortition - block_limit: ExecutionCost::max_value(), - network_epoch: PEER_VERSION_EPOCH_2_05, - }, - StacksEpoch { - epoch_id: StacksEpochId::Epoch21, - start_height: 28, - end_height: STACKS_EPOCH_MAX, - block_limit: ExecutionCost::max_value(), - network_epoch: PEER_VERSION_EPOCH_2_1, - }, - ]; - - peer_config.epochs = Some(epochs); - peer_config.initial_balances = initial_balances; - let burnchain = peer_config.burnchain.clone(); - - let mut peer = TestPeer::new(peer_config); - let versioned_contract_opt: RefCell> = RefCell::new(None); - let nonce: RefCell = RefCell::new(0); - - let mut make_tenure = - |miner: &mut TestMiner, - sortdb: &mut SortitionDB, - chainstate: &mut StacksChainState, - vrfproof: VRFProof, - parent_opt: Option<&StacksBlock>, - microblock_parent_opt: Option<&StacksMicroblockHeader>| { - let tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); - - let stacks_tip_opt = - NakamotoChainState::get_canonical_block_header(chainstate.db(), sortdb) - .unwrap(); - let parent_tip = match stacks_tip_opt { - None => StacksChainState::get_genesis_header_info(chainstate.db()).unwrap(), - Some(header_tip) => { - let ic = sortdb.index_conn(); - let snapshot = SortitionDB::get_block_snapshot_for_winning_stacks_block( - &ic, - &tip.sortition_id, - &header_tip.anchored_header.block_hash(), - ) - .unwrap() - .unwrap(); // succeeds because we don't fork - StacksChainState::get_anchored_block_header_info( - chainstate.db(), - &snapshot.consensus_hash, - &snapshot.winning_stacks_block_hash, - ) - .unwrap() - .unwrap() - } - }; - - let parent_header_hash = parent_tip.anchored_header.block_hash(); - let parent_consensus_hash = parent_tip.consensus_hash.clone(); - let parent_index_hash = StacksBlockHeader::make_index_block_hash( - &parent_consensus_hash, - &parent_header_hash, - ); - - let next_nonce = *nonce.borrow(); - let coinbase_tx = make_coinbase_with_nonce( - miner, - parent_tip.stacks_block_height as usize, - next_nonce, - None, - ); - - let versioned_contract = make_smart_contract_with_version( - miner, - next_nonce + 1, - tip.block_height.try_into().unwrap(), - 0, - Some(ClarityVersion::Clarity1), - Some(1000), - ); - - *versioned_contract_opt.borrow_mut() = Some(versioned_contract); - *nonce.borrow_mut() = next_nonce + 1; - - let mut mblock_pubkey_hash_bytes = [0u8; 20]; - mblock_pubkey_hash_bytes.copy_from_slice(&coinbase_tx.txid()[0..20]); - - let builder = StacksBlockBuilder::make_block_builder( - &burnchain, - chainstate.mainnet, - &parent_tip, - vrfproof, - tip.total_burn, - Hash160(mblock_pubkey_hash_bytes), - ) - .unwrap(); - - let anchored_block = StacksBlockBuilder::make_anchored_block_from_txs( - builder, - chainstate, - &sortdb.index_conn(), - vec![coinbase_tx], - ) - .unwrap(); - - eprintln!("{:?}", &anchored_block.0); - (anchored_block.0, vec![]) - }; - - for i in 0..2 { - let (burn_ops, stacks_block, microblocks) = peer.make_tenure(&mut make_tenure); - let (_, _, consensus_hash) = peer.next_burnchain_block(burn_ops.clone()); - - let sortdb = peer.sortdb.take().unwrap(); - let mut node = peer.stacks_node.take().unwrap(); - - // the empty block should be accepted - match Relayer::process_new_anchored_block( - &sortdb.index_conn(), - &mut node.chainstate, - &consensus_hash, - &stacks_block, - 123, - ) { - Ok(x) => { - assert!(x, "Did not accept valid block"); - } - Err(e) => { - panic!("Got unexpected error {:?}", &e); - } - }; - - // process it - peer.coord.handle_new_stacks_block().unwrap(); - - // the mempool would reject a versioned contract transaction, since we're not yet at - // tenure 28 - let versioned_contract = (*versioned_contract_opt.borrow()).clone().unwrap(); - let versioned_contract_len = versioned_contract.serialize_to_vec().len(); - match node.chainstate.will_admit_mempool_tx( - &sortdb.index_conn(), - &consensus_hash, - &stacks_block.block_hash(), - &versioned_contract, - versioned_contract_len as u64, - ) { - Err(MemPoolRejection::Other(msg)) => { - assert!(msg.find("not supported in this epoch").is_some()); - } - Err(e) => { - panic!("will_admit_mempool_tx {:?}", &e); - } - Ok(_) => { - panic!("will_admit_mempool_tx succeeded"); - } - }; - - peer.sortdb = Some(sortdb); - peer.stacks_node = Some(node); - } - - // *now* it should succeed, since tenure 28 was in epoch 2.1 - let (burn_ops, stacks_block, microblocks) = peer.make_tenure(&mut make_tenure); - let (_, _, consensus_hash) = peer.next_burnchain_block(burn_ops.clone()); - - let sortdb = peer.sortdb.take().unwrap(); - let mut node = peer.stacks_node.take().unwrap(); - match Relayer::process_new_anchored_block( - &sortdb.index_conn(), - &mut node.chainstate, - &consensus_hash, - &stacks_block, - 123, - ) { - Ok(x) => { - assert!(x, "Failed to process valid versioned smart contract block"); - } - Err(e) => { - panic!("Got unexpected error {:?}", &e); - } - }; - - // process it - peer.coord.handle_new_stacks_block().unwrap(); - - // the mempool would accept a versioned contract transaction, since we're not yet at - // tenure 28 - let versioned_contract = (*versioned_contract_opt.borrow()).clone().unwrap(); - let versioned_contract_len = versioned_contract.serialize_to_vec().len(); - match node.chainstate.will_admit_mempool_tx( - &sortdb.index_conn(), - &consensus_hash, - &stacks_block.block_hash(), - &versioned_contract, - versioned_contract_len as u64, - ) { - Err(e) => { - panic!("will_admit_mempool_tx {:?}", &e); - } - Ok(_) => {} - }; - - peer.sortdb = Some(sortdb); - peer.stacks_node = Some(node); - } - - // TODO: process bans - // TODO: test sending invalid blocks-available and microblocks-available (should result in a ban) - // TODO: test sending invalid transactions (should result in a ban) - // TODO: test bandwidth limits (sending too much should result in a nack, and then a ban) } diff --git a/stackslib/src/net/rpc.rs b/stackslib/src/net/rpc.rs index 2d6651cddad..78b1ff096b2 100644 --- a/stackslib/src/net/rpc.rs +++ b/stackslib/src/net/rpc.rs @@ -37,12 +37,13 @@ use clarity::vm::{ClarityName, ClarityVersion, ContractName, SymbolicExpression, use libstackerdb::{StackerDBChunkAckData, StackerDBChunkData}; use rand::prelude::*; use rand::thread_rng; -use rusqlite::{DatabaseName, NO_PARAMS}; +use rusqlite::DatabaseName; use stacks_common::codec::StacksMessageCodec; use stacks_common::types::chainstate::{ BlockHeaderHash, BurnchainHeaderHash, StacksAddress, StacksBlockId, }; use stacks_common::types::net::{PeerAddress, PeerHost}; +use stacks_common::types::sqlite::NO_PARAMS; use stacks_common::types::StacksPublicKeyBuffer; use stacks_common::util::chunked_encoding::*; use stacks_common::util::get_epoch_time_secs; @@ -193,7 +194,7 @@ impl ConversationHttp { /// Is a request in-progress? pub fn is_request_inflight(&self) -> bool { - self.pending_request.is_some() + self.pending_request.is_some() || self.pending_response.is_some() } /// Start a HTTP request from this peer, and expect a response. @@ -553,12 +554,12 @@ impl ConversationHttp { )?; info!("Handled StacksHTTPRequest"; - "verb" => %verb, - "path" => %request_path, - "processing_time_ms" => start_time.elapsed().as_millis(), - "latency_ms" => latency, - "conn_id" => self.conn_id, - "peer_addr" => &self.peer_addr); + "verb" => %verb, + "path" => %request_path, + "processing_time_ms" => start_time.elapsed().as_millis(), + "latency_ms" => latency, + "conn_id" => self.conn_id, + "peer_addr" => &self.peer_addr); if let Some(msg) = msg_opt { ret.push(msg); diff --git a/stackslib/src/net/server.rs b/stackslib/src/net/server.rs index c920a3cefff..a26fa2f7b41 100644 --- a/stackslib/src/net/server.rs +++ b/stackslib/src/net/server.rs @@ -20,6 +20,7 @@ use std::sync::mpsc::{sync_channel, Receiver, RecvError, SendError, SyncSender, use mio::net as mio_net; use stacks_common::types::net::{PeerAddress, PeerHost}; +use stacks_common::types::StacksEpochId; use stacks_common::util::get_epoch_time_secs; use crate::burnchains::{Burnchain, BurnchainView}; @@ -891,7 +892,7 @@ mod test { 1, 0, |client_id, ref mut chainstate| { - let peer_server_block = make_codec_test_block(25); + let peer_server_block = make_codec_test_block(25, StacksEpochId::Epoch25); let peer_server_consensus_hash = ConsensusHash([(client_id + 1) as u8; 20]); let index_block_hash = StacksBlockHeader::make_index_block_hash( &peer_server_consensus_hash, @@ -924,7 +925,7 @@ mod test { // should be a Block let http_response_bytes = http_response_bytes_res.unwrap(); - let peer_server_block = make_codec_test_block(25); + let peer_server_block = make_codec_test_block(25, StacksEpochId::Epoch25); let peer_server_consensus_hash = ConsensusHash([(client_id + 1) as u8; 20]); let index_block_hash = StacksBlockHeader::make_index_block_hash( &peer_server_consensus_hash, @@ -959,7 +960,7 @@ mod test { 10, 0, |client_id, ref mut chainstate| { - let peer_server_block = make_codec_test_block(25); + let peer_server_block = make_codec_test_block(25, StacksEpochId::latest()); let peer_server_consensus_hash = ConsensusHash([(client_id + 1) as u8; 20]); let index_block_hash = StacksBlockHeader::make_index_block_hash( &peer_server_consensus_hash, @@ -992,7 +993,7 @@ mod test { // should be a Block let http_response_bytes = http_response_bytes_res.unwrap(); - let peer_server_block = make_codec_test_block(25); + let peer_server_block = make_codec_test_block(25, StacksEpochId::latest()); let peer_server_consensus_hash = ConsensusHash([(client_id + 1) as u8; 20]); let index_block_hash = StacksBlockHeader::make_index_block_hash( &peer_server_consensus_hash, @@ -1308,7 +1309,7 @@ mod test { 1, 600, |client_id, ref mut chainstate| { - let peer_server_block = make_codec_test_block(25); + let peer_server_block = make_codec_test_block(25, StacksEpochId::latest()); let peer_server_consensus_hash = ConsensusHash([(client_id + 1) as u8; 20]); let index_block_hash = StacksBlockHeader::make_index_block_hash( &peer_server_consensus_hash, diff --git a/stackslib/src/net/stackerdb/config.rs b/stackslib/src/net/stackerdb/config.rs index f2d8521ae44..97f8214913a 100644 --- a/stackslib/src/net/stackerdb/config.rs +++ b/stackslib/src/net/stackerdb/config.rs @@ -292,6 +292,7 @@ impl StackerDBConfig { contract_id: &QualifiedContractIdentifier, tip: &StacksBlockId, signers: Vec<(StacksAddress, u32)>, + local_max_neighbors: u64, ) -> Result { let value = chainstate.eval_read_only(burn_dbconn, tip, contract_id, "(stackerdb-get-config)")?; @@ -365,11 +366,12 @@ impl StackerDBConfig { )); } - let max_neighbors = config_tuple + let mut max_neighbors = config_tuple .get("max-neighbors") .expect("FATAL: missing 'max-neighbors'") .clone() .expect_u128()?; + if max_neighbors > usize::MAX as u128 { let reason = format!( "Contract {} stipulates a maximum number of neighbors beyond usize::MAX", @@ -382,6 +384,16 @@ impl StackerDBConfig { )); } + if max_neighbors > u128::from(local_max_neighbors) { + debug!( + "Contract {} stipulates a maximum number of neighbors ({}) beyond locally-configured maximum {}; defaulting to locally-configured maximum", + contract_id, + max_neighbors, + local_max_neighbors, + ); + max_neighbors = u128::from(local_max_neighbors); + } + let hint_replicas_list = config_tuple .get("hint-replicas") .expect("FATAL: missing 'hint-replicas'") @@ -435,7 +447,7 @@ impl StackerDBConfig { )); } - if port < 1024 || port > ((u16::MAX - 1) as u128) { + if port < 1024 || port > u128::from(u16::MAX - 1) { let reason = format!( "Contract {} stipulates a port lower than 1024 or above u16::MAX - 1", contract_id @@ -446,11 +458,20 @@ impl StackerDBConfig { reason, )); } + // NOTE: port is now known to be in range [1024, 65535] let mut pubkey_hash_slice = [0u8; 20]; pubkey_hash_slice.copy_from_slice(&pubkey_hash_bytes[0..20]); let peer_addr = PeerAddress::from_slice(&addr_bytes).expect("FATAL: not 16 bytes"); + if peer_addr.is_in_private_range() { + debug!( + "Ignoring private IP address '{}' in hint-replicas", + &peer_addr.to_socketaddr(port as u16) + ); + continue; + } + let naddr = NeighborAddress { addrbytes: peer_addr, port: port as u16, @@ -475,6 +496,7 @@ impl StackerDBConfig { chainstate: &mut StacksChainState, sortition_db: &SortitionDB, contract_id: &QualifiedContractIdentifier, + max_neighbors: u64, ) -> Result { let chain_tip = NakamotoChainState::get_canonical_block_header(chainstate.db(), sortition_db)? @@ -493,7 +515,7 @@ impl StackerDBConfig { let cur_epoch = SortitionDB::get_stacks_epoch(sortition_db.conn(), burn_tip.block_height)? .expect("FATAL: no epoch defined"); - let dbconn = sortition_db.index_conn(); + let dbconn = sortition_db.index_handle_at_block(chainstate, &chain_tip_hash)?; // check the target contract let res = chainstate.with_read_only_clarity_tx(&dbconn, &chain_tip_hash, |clarity_tx| { @@ -533,16 +555,30 @@ impl StackerDBConfig { reason, )); } else if let Some(Err(e)) = res { - warn!( - "Could not use contract {} for StackerDB: {:?}", - contract_id, &e - ); + if contract_id.is_boot() { + debug!( + "Could not use contract {} for StackerDB: {:?}", + contract_id, &e + ); + } else { + warn!( + "Could not use contract {} for StackerDB: {:?}", + contract_id, &e + ); + } return Err(e); } // evaluate the contract for these two functions let signers = Self::eval_signer_slots(chainstate, &dbconn, contract_id, &chain_tip_hash)?; - let config = Self::eval_config(chainstate, &dbconn, contract_id, &chain_tip_hash, signers)?; + let config = Self::eval_config( + chainstate, + &dbconn, + contract_id, + &chain_tip_hash, + signers, + max_neighbors, + )?; Ok(config) } } diff --git a/stackslib/src/net/stackerdb/db.rs b/stackslib/src/net/stackerdb/db.rs index 6cdebb69d96..d95d3ebbdb6 100644 --- a/stackslib/src/net/stackerdb/db.rs +++ b/stackslib/src/net/stackerdb/db.rs @@ -22,8 +22,9 @@ use clarity::vm::types::QualifiedContractIdentifier; use clarity::vm::ContractName; use libstackerdb::{SlotMetadata, STACKERDB_MAX_CHUNK_SIZE}; use rusqlite::types::ToSql; -use rusqlite::{Connection, OpenFlags, OptionalExtension, Row, Transaction, NO_PARAMS}; +use rusqlite::{params, Connection, OpenFlags, OptionalExtension, Row, Transaction}; use stacks_common::types::chainstate::{ConsensusHash, StacksAddress}; +use stacks_common::types::sqlite::NO_PARAMS; use stacks_common::util::get_epoch_time_secs; use stacks_common::util::hash::Sha512Trunc256Sum; use stacks_common::util::secp256k1::MessageSignature; @@ -157,7 +158,7 @@ fn inner_get_stackerdb_id( smart_contract: &QualifiedContractIdentifier, ) -> Result { let sql = "SELECT rowid FROM databases WHERE smart_contract_id = ?1"; - let args: &[&dyn ToSql] = &[&smart_contract.to_string()]; + let args = params![smart_contract.to_string()]; Ok(query_row(conn, sql, args)?.ok_or(net_error::NoSuchStackerDB(smart_contract.clone()))?) } @@ -171,7 +172,7 @@ fn inner_get_slot_metadata( ) -> Result, net_error> { let stackerdb_id = inner_get_stackerdb_id(conn, smart_contract)?; let sql = "SELECT slot_id,version,data_hash,signature FROM chunks WHERE stackerdb_id = ?1 AND slot_id = ?2"; - let args: &[&dyn ToSql] = &[&stackerdb_id, &slot_id]; + let args = params![stackerdb_id, slot_id]; query_row(conn, &sql, args).map_err(|e| e.into()) } @@ -186,7 +187,7 @@ fn inner_get_slot_validation( let stackerdb_id = inner_get_stackerdb_id(conn, smart_contract)?; let sql = "SELECT signer,write_time,version FROM chunks WHERE stackerdb_id = ?1 AND slot_id = ?2"; - let args: &[&dyn ToSql] = &[&stackerdb_id, &slot_id]; + let args = params![stackerdb_id, slot_id]; query_row(conn, &sql, args).map_err(|e| e.into()) } @@ -206,7 +207,7 @@ impl<'a> StackerDBTx<'a> { smart_contract_id: &QualifiedContractIdentifier, ) -> Result<(), net_error> { let qry = "DELETE FROM databases WHERE smart_contract_id = ?1"; - let args: &[&dyn ToSql] = &[&smart_contract_id.to_string()]; + let args = params![smart_contract_id.to_string()]; let mut stmt = self.sql_tx.prepare(qry)?; stmt.execute(args)?; Ok(()) @@ -246,7 +247,7 @@ impl<'a> StackerDBTx<'a> { let qry = "INSERT OR REPLACE INTO databases (smart_contract_id) VALUES (?1)"; let mut stmt = self.sql_tx.prepare(&qry)?; - let args: &[&dyn ToSql] = &[&smart_contract.to_string()]; + let args = params![smart_contract.to_string()]; stmt.execute(args)?; let stackerdb_id = self.get_stackerdb_id(smart_contract)?; @@ -258,15 +259,15 @@ impl<'a> StackerDBTx<'a> { for (principal, slot_count) in slots.iter() { test_debug!("Create StackerDB slots: ({}, {})", &principal, slot_count); for _ in 0..*slot_count { - let args: &[&dyn ToSql] = &[ - &stackerdb_id, - &principal.to_string(), - &slot_id, - &NO_VERSION, - &0, - &vec![], - &Sha512Trunc256Sum([0u8; 32]), - &MessageSignature::empty(), + let args = params![ + stackerdb_id, + principal.to_string(), + slot_id, + NO_VERSION, + 0, + vec![], + Sha512Trunc256Sum([0u8; 32]), + MessageSignature::empty(), ]; stmt.execute(args)?; @@ -286,7 +287,7 @@ impl<'a> StackerDBTx<'a> { ) -> Result<(), net_error> { let stackerdb_id = self.get_stackerdb_id(smart_contract)?; let qry = "DELETE FROM chunks WHERE stackerdb_id = ?1"; - let args: &[&dyn ToSql] = &[&stackerdb_id]; + let args = params![stackerdb_id]; let mut stmt = self.sql_tx.prepare(&qry)?; stmt.execute(args)?; Ok(()) @@ -326,15 +327,15 @@ impl<'a> StackerDBTx<'a> { // new slot, or existing slot with a different signer let qry = "INSERT OR REPLACE INTO chunks (stackerdb_id,signer,slot_id,version,write_time,data,data_hash,signature) VALUES (?1,?2,?3,?4,?5,?6,?7,?8)"; let mut stmt = self.sql_tx.prepare(&qry)?; - let args: &[&dyn ToSql] = &[ - &stackerdb_id, - &principal.to_string(), - &slot_id, - &NO_VERSION, - &0, - &vec![], - &Sha512Trunc256Sum([0u8; 32]), - &MessageSignature::empty(), + let args = params![ + stackerdb_id, + principal.to_string(), + slot_id, + NO_VERSION, + 0, + vec![], + Sha512Trunc256Sum([0u8; 32]), + MessageSignature::empty(), ]; stmt.execute(args)?; @@ -374,14 +375,14 @@ impl<'a> StackerDBTx<'a> { let sql = "UPDATE chunks SET version = ?1, data_hash = ?2, signature = ?3, data = ?4, write_time = ?5 WHERE stackerdb_id = ?6 AND slot_id = ?7"; let mut stmt = self.sql_tx.prepare(&sql)?; - let args: &[&dyn ToSql] = &[ - &slot_desc.slot_version, - &Sha512Trunc256Sum::from_data(chunk), - &slot_desc.signature, - &chunk, - &u64_to_sql(get_epoch_time_secs())?, - &stackerdb_id, - &slot_desc.slot_id, + let args = params![ + slot_desc.slot_version, + Sha512Trunc256Sum::from_data(chunk), + slot_desc.signature, + chunk, + u64_to_sql(get_epoch_time_secs())?, + stackerdb_id, + slot_desc.slot_id, ]; stmt.execute(args)?; @@ -548,7 +549,7 @@ impl StackerDBs { ) -> Result, net_error> { let stackerdb_id = self.get_stackerdb_id(smart_contract)?; let sql = "SELECT signer FROM chunks WHERE stackerdb_id = ?1 AND slot_id = ?2"; - let args: &[&dyn ToSql] = &[&stackerdb_id, &slot_id]; + let args = params![stackerdb_id, slot_id]; query_row(&self.conn, &sql, args).map_err(|e| e.into()) } @@ -561,7 +562,7 @@ impl StackerDBs { ) -> Result, net_error> { let stackerdb_id = self.get_stackerdb_id(smart_contract)?; let sql = "SELECT signer FROM chunks WHERE stackerdb_id = ?1 GROUP BY signer"; - let args: &[&dyn ToSql] = &[&stackerdb_id]; + let args = params![stackerdb_id]; query_rows(&self.conn, &sql, args).map_err(|e| e.into()) } @@ -582,7 +583,7 @@ impl StackerDBs { ) -> Result, net_error> { let stackerdb_id = inner_get_stackerdb_id(&self.conn, smart_contract)?; let sql = "SELECT slot_id,version,data_hash,signature FROM chunks WHERE stackerdb_id = ?1 ORDER BY slot_id ASC"; - let args: &[&dyn ToSql] = &[&stackerdb_id]; + let args = params![stackerdb_id]; query_rows(&self.conn, &sql, args).map_err(|e| e.into()) } @@ -606,7 +607,7 @@ impl StackerDBs { ) -> Result, net_error> { let stackerdb_id = self.get_stackerdb_id(smart_contract)?; let qry = "SELECT version FROM chunks WHERE stackerdb_id = ?1 AND slot_id = ?2"; - let args: &[&dyn ToSql] = &[&stackerdb_id, &slot_id]; + let args = params![stackerdb_id, slot_id]; self.conn .query_row(qry, args, |row| row.get(0)) @@ -621,7 +622,7 @@ impl StackerDBs { ) -> Result, net_error> { let stackerdb_id = self.get_stackerdb_id(smart_contract)?; let sql = "SELECT version FROM chunks WHERE stackerdb_id = ?1 ORDER BY slot_id"; - let args: &[&dyn ToSql] = &[&stackerdb_id]; + let args = params![stackerdb_id]; query_rows(&self.conn, &sql, args).map_err(|e| e.into()) } @@ -632,7 +633,7 @@ impl StackerDBs { ) -> Result, net_error> { let stackerdb_id = self.get_stackerdb_id(smart_contract)?; let sql = "SELECT write_time FROM chunks WHERE stackerdb_id = ?1 ORDER BY slot_id"; - let args: &[&dyn ToSql] = &[&stackerdb_id]; + let args = params![stackerdb_id]; query_rows(&self.conn, &sql, args).map_err(|e| e.into()) } @@ -647,7 +648,7 @@ impl StackerDBs { ) -> Result>, net_error> { let stackerdb_id = self.get_stackerdb_id(smart_contract)?; let qry = "SELECT data FROM chunks WHERE stackerdb_id = ?1 AND slot_id = ?2"; - let args: &[&dyn ToSql] = &[&stackerdb_id, &slot_id]; + let args = params![stackerdb_id, slot_id]; self.conn .query_row(qry, args, |row| row.get(0)) @@ -680,7 +681,7 @@ impl StackerDBs { ) -> Result, net_error> { let stackerdb_id = self.get_stackerdb_id(smart_contract)?; let qry = "SELECT slot_id,version,signature,data FROM chunks WHERE stackerdb_id = ?1 AND slot_id = ?2 AND version = ?3"; - let args: &[&dyn ToSql] = &[&stackerdb_id, &slot_id, &slot_version]; + let args = params![stackerdb_id, slot_id, slot_version]; query_row(&self.conn, &qry, args).map_err(|e| e.into()) } } diff --git a/stackslib/src/net/stackerdb/mod.rs b/stackslib/src/net/stackerdb/mod.rs index 7a1b29b2ee0..847363b2e31 100644 --- a/stackslib/src/net/stackerdb/mod.rs +++ b/stackslib/src/net/stackerdb/mod.rs @@ -119,6 +119,7 @@ pub mod db; pub mod sync; use std::collections::{HashMap, HashSet}; +use std::ops::Range; use clarity::vm::types::QualifiedContractIdentifier; use libstackerdb::{SlotMetadata, STACKERDB_MAX_CHUNK_SIZE}; @@ -154,6 +155,7 @@ pub const STACKERDB_CONFIG_FUNCTION: &str = "stackerdb-get-config"; pub const MINER_SLOT_COUNT: u32 = 2; /// Final result of synchronizing state with a remote set of DB replicas +#[derive(Clone)] pub struct StackerDBSyncResult { /// which contract this is a replica for pub contract_id: QualifiedContractIdentifier, @@ -161,12 +163,12 @@ pub struct StackerDBSyncResult { pub chunk_invs: HashMap, /// list of data to store pub chunks_to_store: Vec, - /// neighbors that died while syncing - dead: HashSet, - /// neighbors that misbehaved while syncing - broken: HashSet, /// neighbors that have stale views, but are otherwise online pub(crate) stale: HashSet, + /// number of connections made + pub num_connections: u64, + /// number of attempted connections + pub num_attempted_connections: u64, } /// Settings for the Stacker DB @@ -204,6 +206,22 @@ impl StackerDBConfig { pub fn num_slots(&self) -> u32 { self.signers.iter().fold(0, |acc, s| acc + s.1) } + + /// What are the slot index ranges for each signer? + /// Returns the ranges in the same ordering as `self.signers` + pub fn signer_ranges(&self) -> Vec> { + let mut slot_index = 0; + let mut result = Vec::with_capacity(self.signers.len()); + for (_, slot_count) in self.signers.iter() { + let end = slot_index + *slot_count; + result.push(Range { + start: slot_index, + end, + }); + slot_index = end; + } + result + } } /// This is the set of replicated chunks in all stacker DBs that this node subscribes to. @@ -267,6 +285,7 @@ impl StackerDBs { chainstate: &mut StacksChainState, sortdb: &SortitionDB, stacker_db_configs: HashMap, + num_neighbors: u64, ) -> Result, net_error> { let existing_contract_ids = self.get_stackerdb_contract_ids()?; let mut new_stackerdb_configs = HashMap::new(); @@ -278,25 +297,41 @@ impl StackerDBs { == boot_code_id(MINERS_NAME, chainstate.mainnet) { // .miners contract -- directly generate the config - NakamotoChainState::make_miners_stackerdb_config(sortdb, &tip).unwrap_or_else(|e| { - warn!( - "Failed to generate .miners StackerDB config"; - "contract" => %stackerdb_contract_id, - "err" => ?e, - ); - StackerDBConfig::noop() - }) - } else { - // attempt to load the config from the contract itself - StackerDBConfig::from_smart_contract(chainstate, &sortdb, &stackerdb_contract_id) + NakamotoChainState::make_miners_stackerdb_config(sortdb, &tip) + .map(|(config, _)| config) .unwrap_or_else(|e| { warn!( - "Failed to load StackerDB config"; + "Failed to generate .miners StackerDB config"; "contract" => %stackerdb_contract_id, "err" => ?e, ); StackerDBConfig::noop() }) + } else { + // attempt to load the config from the contract itself + StackerDBConfig::from_smart_contract( + chainstate, + &sortdb, + &stackerdb_contract_id, + num_neighbors, + ) + .unwrap_or_else(|e| { + if matches!(e, net_error::NoSuchStackerDB(_)) && stackerdb_contract_id.is_boot() + { + debug!( + "Failed to load StackerDB config"; + "contract" => %stackerdb_contract_id, + "err" => ?e, + ); + } else { + warn!( + "Failed to load StackerDB config"; + "contract" => %stackerdb_contract_id, + "err" => ?e, + ); + } + StackerDBConfig::noop() + }) }; // Create the StackerDB replica if it does not exist already if !existing_contract_ids.contains(&stackerdb_contract_id) { @@ -390,6 +425,14 @@ pub struct StackerDBSync { need_resync: bool, /// Track stale neighbors pub(crate) stale_neighbors: HashSet, + /// How many attempted connections have been made in the last pass (gets reset) + num_attempted_connections: u64, + /// How many connections have been made in the last pass (gets reset) + num_connections: u64, + /// Number of state machine passes + rounds: u128, + /// Round when we last pushed + push_round: u128, } impl StackerDBSyncResult { @@ -400,9 +443,9 @@ impl StackerDBSyncResult { contract_id: chunk.contract_id, chunk_invs: HashMap::new(), chunks_to_store: vec![chunk.chunk_data], - dead: HashSet::new(), - broken: HashSet::new(), stale: HashSet::new(), + num_attempted_connections: 0, + num_connections: 0, } } } @@ -433,16 +476,6 @@ impl PeerNetwork { if let Some(config) = stacker_db_configs.get(sc) { match stacker_db_sync.run(self, config) { Ok(Some(result)) => { - // clear broken nodes - for broken in result.broken.iter() { - debug!("StackerDB replica is broken: {:?}", broken); - self.deregister_and_ban_neighbor(broken); - } - // clear dead nodes - for dead in result.dead.iter() { - debug!("StackerDB replica is dead: {:?}", dead); - self.deregister_neighbor(dead); - } results.push(result); } Ok(None) => {} diff --git a/stackslib/src/net/stackerdb/sync.rs b/stackslib/src/net/stackerdb/sync.rs index 66ad54601af..53a1f67c469 100644 --- a/stackslib/src/net/stackerdb/sync.rs +++ b/stackslib/src/net/stackerdb/sync.rs @@ -72,6 +72,10 @@ impl StackerDBSync { last_run_ts: 0, need_resync: false, stale_neighbors: HashSet::new(), + num_connections: 0, + num_attempted_connections: 0, + rounds: 0, + push_round: 0, }; dbsync.reset(None, config); dbsync @@ -158,7 +162,7 @@ impl StackerDBSync { } /// Reset this state machine, and get the StackerDBSyncResult with newly-obtained chunk data - /// and newly-learned information about broken and dead peers. + /// and newly-learned information about connection statistics pub fn reset( &mut self, network: Option<&PeerNetwork>, @@ -176,9 +180,9 @@ impl StackerDBSync { contract_id: self.smart_contract_id.clone(), chunk_invs, chunks_to_store: chunks, - dead: self.comms.take_dead_neighbors(), - broken: self.comms.take_broken_neighbors(), stale: std::mem::replace(&mut self.stale_neighbors, HashSet::new()), + num_connections: self.num_connections, + num_attempted_connections: self.num_attempted_connections, }; // keep all connected replicas, and replenish from config hints and the DB as needed @@ -211,6 +215,9 @@ impl StackerDBSync { self.last_run_ts = get_epoch_time_secs(); self.state = StackerDBSyncState::ConnectBegin; + self.num_connections = 0; + self.num_attempted_connections = 0; + self.rounds += 1; result } @@ -403,6 +410,16 @@ impl StackerDBSync { thread_rng().gen::() % chunk_inv.num_outbound_replicas == 0 }; + debug!( + "{:?}: Can push chunk StackerDBChunk(db={},id={},ver={}) to {}. Replicate? {}", + &network.get_local_peer(), + &self.smart_contract_id, + our_chunk.chunk_data.slot_id, + our_chunk.chunk_data.slot_version, + &naddr, + do_replicate + ); + if !do_replicate { continue; } @@ -612,7 +629,7 @@ impl StackerDBSync { /// Returns Err(..) on DB query error pub fn connect_begin(&mut self, network: &mut PeerNetwork) -> Result { if self.replicas.len() == 0 { - // find some from the peer Db + // find some from the peer DB let replicas = self.find_qualified_replicas(network)?; self.replicas = replicas; } @@ -628,6 +645,15 @@ impl StackerDBSync { let naddrs = mem::replace(&mut self.replicas, HashSet::new()); for naddr in naddrs.into_iter() { + if self.comms.is_neighbor_connecting(network, &naddr) { + debug!( + "{:?}: connect_begin: already connecting to StackerDB peer {:?}", + network.get_local_peer(), + &naddr + ); + self.replicas.insert(naddr); + continue; + } if self.comms.has_neighbor_session(network, &naddr) { debug!( "{:?}: connect_begin: already connected to StackerDB peer {:?}", @@ -651,13 +677,16 @@ impl StackerDBSync { network.get_local_peer(), &naddr ); + self.num_attempted_connections += 1; + self.num_connections += 1; } Ok(false) => { // need to retry self.replicas.insert(naddr); + self.num_attempted_connections += 1; } Err(_e) => { - info!("Failed to begin session with {:?}: {:?}", &naddr, &_e); + debug!("Failed to begin session with {:?}: {:?}", &naddr, &_e); } } } @@ -718,7 +747,7 @@ impl StackerDBSync { ); // disconnect - self.comms.add_dead(network, &naddr); + self.connected_replicas.remove(&naddr); continue; } @@ -984,9 +1013,11 @@ impl StackerDBSync { /// Returns true if there are no more chunks to push. /// Returns false if there are pub fn pushchunks_begin(&mut self, network: &mut PeerNetwork) -> Result { - if self.chunk_push_priorities.len() == 0 { + if self.chunk_push_priorities.len() == 0 && self.push_round != self.rounds { + // only do this once per round let priorities = self.make_chunk_push_schedule(&network)?; self.chunk_push_priorities = priorities; + self.push_round = self.rounds; } if self.chunk_push_priorities.len() == 0 { // done @@ -1001,8 +1032,6 @@ impl StackerDBSync { self.chunk_push_priorities.len() ); - let mut pushed = 0; - // fill up our comms with $capacity requests for _i in 0..self.request_capacity { if self.comms.count_inflight() >= self.request_capacity { @@ -1010,15 +1039,13 @@ impl StackerDBSync { } let chunk_push = self.chunk_push_priorities[cur_priority].0.clone(); + // try the first neighbor in the chunk_push_priorities list let selected_neighbor_opt = self.chunk_push_priorities[cur_priority] .1 - .iter() - .enumerate() - .find(|(_i, naddr)| !self.comms.has_inflight(naddr)); + .first() + .map(|neighbor| (0, neighbor)); - let (idx, selected_neighbor) = if let Some(x) = selected_neighbor_opt { - x - } else { + let Some((idx, selected_neighbor)) = selected_neighbor_opt else { debug!("{:?}: pushchunks_begin: no available neighbor to send StackerDBChunk(db={},id={},ver={}) to", &network.get_local_peer(), &self.smart_contract_id, @@ -1056,8 +1083,6 @@ impl StackerDBSync { continue; } - pushed += 1; - // record what we just sent self.chunk_push_receipts .insert(selected_neighbor.clone(), (slot_id, slot_version)); @@ -1068,11 +1093,14 @@ impl StackerDBSync { // next-prioritized chunk cur_priority = (cur_priority + 1) % self.chunk_push_priorities.len(); } - if pushed == 0 { - return Err(net_error::PeerNotConnected); - } self.next_chunk_push_priority = cur_priority; - Ok(self.chunk_push_priorities.len() == 0) + Ok(self + .chunk_push_priorities + .iter() + .fold(0usize, |acc, (_chunk, num_naddrs)| { + acc.saturating_add(num_naddrs.len()) + }) + == 0) } /// Collect push-chunk replies from neighbors. @@ -1122,7 +1150,14 @@ impl StackerDBSync { } } - self.comms.count_inflight() == 0 + let inflight = self.comms.count_inflight(); + debug!( + "{:?}: inflight messages for {}: {:?}", + network.get_local_peer(), + &self.smart_contract_id, + inflight + ); + inflight == 0 } /// Recalculate the download schedule based on chunkinvs received on push @@ -1173,8 +1208,9 @@ impl StackerDBSync { loop { debug!( - "{:?}: stacker DB sync state is {:?}", + "{:?}: stacker DB sync state for {} is {:?}", network.get_local_peer(), + &self.smart_contract_id, &self.state ); let mut blocked = true; diff --git a/stackslib/src/net/stackerdb/tests/config.rs b/stackslib/src/net/stackerdb/tests/config.rs index 9600ed79a8e..a075d7b974b 100644 --- a/stackslib/src/net/stackerdb/tests/config.rs +++ b/stackslib/src/net/stackerdb/tests/config.rs @@ -133,7 +133,7 @@ fn test_valid_and_invalid_stackerdb_configs() { max-neighbors: u7, hint-replicas: (list { - addr: (list u0 u0 u0 u0 u0 u0 u0 u0 u0 u0 u255 u255 u127 u0 u0 u1), + addr: (list u0 u0 u0 u0 u0 u0 u0 u0 u0 u0 u255 u255 u142 u150 u80 u100), port: u8901, public-key-hash: 0x0123456789abcdef0123456789abcdef01234567 }) @@ -152,7 +152,7 @@ fn test_valid_and_invalid_stackerdb_configs() { write_freq: 4, max_writes: 56, hint_replicas: vec![NeighborAddress { - addrbytes: PeerAddress::from_ipv4(127, 0, 0, 1), + addrbytes: PeerAddress::from_ipv4(142, 150, 80, 100), port: 8901, public_key_hash: Hash160::from_hex("0123456789abcdef0123456789abcdef01234567") .unwrap(), @@ -174,7 +174,7 @@ fn test_valid_and_invalid_stackerdb_configs() { max-neighbors: u7, hint-replicas: (list { - addr: (list u0 u0 u0 u0 u0 u0 u0 u0 u0 u0 u255 u255 u127 u0 u0 u1), + addr: (list u0 u0 u0 u0 u0 u0 u0 u0 u0 u0 u255 u255 u142 u150 u80 u100), port: u8901, public-key-hash: 0x0123456789abcdef0123456789abcdef01234567 }) @@ -193,7 +193,7 @@ fn test_valid_and_invalid_stackerdb_configs() { write_freq: 4, max_writes: 56, hint_replicas: vec![NeighborAddress { - addrbytes: PeerAddress::from_ipv4(127, 0, 0, 1), + addrbytes: PeerAddress::from_ipv4(142, 150, 80, 100), port: 8901, public_key_hash: Hash160::from_hex("0123456789abcdef0123456789abcdef01234567") .unwrap(), @@ -212,7 +212,7 @@ fn test_valid_and_invalid_stackerdb_configs() { max-neighbors: u7, hint-replicas: (list { - addr: (list u0 u0 u0 u0 u0 u0 u0 u0 u0 u0 u255 u255 u127 u0 u0 u1), + addr: (list u0 u0 u0 u0 u0 u0 u0 u0 u0 u0 u255 u255 u142 u150 u80 u100), port: u8901, public-key-hash: 0x0123456789abcdef0123456789abcdef01234567 }) @@ -234,7 +234,7 @@ fn test_valid_and_invalid_stackerdb_configs() { max-neighbors: u7, hint-replicas: (list { - addr: (list u0 u0 u0 u0 u0 u0 u0 u0 u0 u0 u255 u255 u127 u0 u0 u1), + addr: (list u0 u0 u0 u0 u0 u0 u0 u0 u0 u0 u255 u255 u142 u150 u80 u100), port: u8901, public-key-hash: 0x0123456789abcdef0123456789abcdef01234567 }) @@ -256,7 +256,7 @@ fn test_valid_and_invalid_stackerdb_configs() { max-neighbors: u7, hint-replicas: (list { - addr: (list u0 u0 u0 u0 u0 u0 u0 u0 u0 u0 u255 u255 u127 u0 u0 u1), + addr: (list u0 u0 u0 u0 u0 u0 u0 u0 u0 u0 u255 u255 u142 u150 u80 u100), port: u8901, public-key-hash: 0x0123456789abcdef0123456789abcdef01234567 }) @@ -278,7 +278,7 @@ fn test_valid_and_invalid_stackerdb_configs() { max-neighbors: u7, hint-replicas: (list { - addr: (list u0 u0 u0 u0 u0 u0 u0 u0 u0 u0 u255 u255 u127 u0 u0 u1), + addr: (list u0 u0 u0 u0 u0 u0 u0 u0 u0 u0 u255 u255 u142 u150 u80 u100), port: u8901, public-key-hash: 0x0123456789abcdef0123456789abcdef01234567 }) @@ -300,7 +300,7 @@ fn test_valid_and_invalid_stackerdb_configs() { max-neighbors: u7, hint-replicas: (list { - addr: (list u0 u0 u0 u0 u0 u0 u0 u0 u0 u0 u255 u255 u127 u0 u0 u1), + addr: (list u0 u0 u0 u0 u0 u0 u0 u0 u0 u0 u255 u255 u142 u150 u80 u100), port: u8901, public-key-hash: 0x0123456789abcdef0123456789abcdef01234567 }) @@ -322,7 +322,7 @@ fn test_valid_and_invalid_stackerdb_configs() { max-neighbors: u7, hint-replicas: (list { - addr: (list u0 u0 u0 u0 u0 u0 u0 u0 u0 u0 u255 u255 u127 u0 u0 u1), + addr: (list u0 u0 u0 u0 u0 u0 u0 u0 u0 u0 u255 u255 u142 u150 u80 u100), port: u8901, public-key-hash: 0x0123456789abcdef0123456789abcdef01234567 }) @@ -344,7 +344,7 @@ fn test_valid_and_invalid_stackerdb_configs() { max-neighbors: u7, hint-replicas: (list { - addr: (list u0 u0 u0 u0 u0 u0 u0 u0 u0 u0 u255 u255 u127 u0 u0 u1), + addr: (list u0 u0 u0 u0 u0 u0 u0 u0 u0 u0 u255 u255 u142 u150 u80 u100), port: u8901, public-key-hash: 0x0123456789abcdef0123456789abcdef01234567 }) @@ -366,7 +366,7 @@ fn test_valid_and_invalid_stackerdb_configs() { max-neighbors: u7, hint-replicas: (list { - addr: (list u0 u0 u0 u0 u0 u0 u0 u0 u0 u0 u255 u255 u127 u0 u0 u1), + addr: (list u0 u0 u0 u0 u0 u0 u0 u0 u0 u0 u255 u255 u142 u150 u80 u100), port: u8901, public-key-hash: 0x0123456789abcdef0123456789abcdef01234567 }) @@ -388,7 +388,7 @@ fn test_valid_and_invalid_stackerdb_configs() { max-neighbors: u18446744073709551617, hint-replicas: (list { - addr: (list u0 u0 u0 u0 u0 u0 u0 u0 u0 u0 u255 u255 u127 u0 u0 u1), + addr: (list u0 u0 u0 u0 u0 u0 u0 u0 u0 u0 u255 u255 u142 u150 u80 u100), port: u8901, public-key-hash: 0x0123456789abcdef0123456789abcdef01234567 }) @@ -432,7 +432,7 @@ fn test_valid_and_invalid_stackerdb_configs() { max-neighbors: u7, hint-replicas: (list { - addr: (list u0 u0 u0 u0 u0 u0 u0 u0 u0 u0 u255 u255 u127 u0 u0 u1), + addr: (list u0 u0 u0 u0 u0 u0 u0 u0 u0 u0 u255 u255 u142 u150 u80 u100), port: u1, public-key-hash: 0x0123456789abcdef0123456789abcdef01234567 }) @@ -454,7 +454,7 @@ fn test_valid_and_invalid_stackerdb_configs() { max-neighbors: u7, hint-replicas: (list { - addr: (list u0 u0 u0 u0 u0 u0 u0 u0 u0 u0 u255 u255 u127 u0 u0 u1), + addr: (list u0 u0 u0 u0 u0 u0 u0 u0 u0 u0 u255 u255 u142 u150 u80 u100), port: u65537, public-key-hash: 0x0123456789abcdef0123456789abcdef01234567 }) @@ -462,6 +462,44 @@ fn test_valid_and_invalid_stackerdb_configs() { "#, None, ), + ( + // valid, but private IP and absurd max neighbors are both handled + r#" + (define-public (stackerdb-get-signer-slots) + (ok (list { signer: 'ST2TFVBMRPS5SSNP98DQKQ5JNB2B6NZM91C4K3P7B, num-slots: u3 }))) + + (define-public (stackerdb-get-config) + (ok { + chunk-size: u123, + write-freq: u4, + max-writes: u56, + max-neighbors: u1024, + hint-replicas: (list + { + addr: (list u0 u0 u0 u0 u0 u0 u0 u0 u0 u0 u255 u255 u192 u168 u0 u1), + port: u8901, + public-key-hash: 0x0123456789abcdef0123456789abcdef01234567 + }) + })) + "#, + Some(StackerDBConfig { + chunk_size: 123, + signers: vec![( + StacksAddress { + version: 26, + bytes: Hash160::from_hex("b4fdae98b64b9cd6c9436f3b965558966afe890b") + .unwrap(), + }, + 3, + )], + write_freq: 4, + max_writes: 56, + // no neighbors + hint_replicas: vec![], + // max neighbors is truncated + max_neighbors: 32, + }), + ), ]; for (i, (code, _result)) in testcases.iter().enumerate() { @@ -490,7 +528,7 @@ fn test_valid_and_invalid_stackerdb_configs() { ContractName::try_from(format!("test-{}", i)).unwrap(), ); peer.with_db_state(|sortdb, chainstate, _, _| { - match StackerDBConfig::from_smart_contract(chainstate, sortdb, &contract_id) { + match StackerDBConfig::from_smart_contract(chainstate, sortdb, &contract_id, 32) { Ok(config) => { let expected = result .clone() diff --git a/stackslib/src/net/stackerdb/tests/sync.rs b/stackslib/src/net/stackerdb/tests/sync.rs index eeb2f5aae52..d1ac5e58bed 100644 --- a/stackslib/src/net/stackerdb/tests/sync.rs +++ b/stackslib/src/net/stackerdb/tests/sync.rs @@ -32,11 +32,12 @@ use stacks_common::util::hash::{Hash160, Sha512Trunc256Sum}; use stacks_common::util::secp256k1::{MessageSignature, Secp256k1PrivateKey}; use crate::chainstate::burn::db::sortdb::SortitionDB; +use crate::net::p2p::PeerNetwork; use crate::net::relay::Relayer; use crate::net::stackerdb::db::SlotValidation; use crate::net::stackerdb::{StackerDBConfig, StackerDBs}; use crate::net::test::{TestPeer, TestPeerConfig}; -use crate::net::{Error as net_error, StackerDBChunkData}; +use crate::net::{Error as net_error, NetworkResult, StackerDBChunkData}; use crate::util_lib::test::with_timeout; const BASE_PORT: u16 = 33000; @@ -179,6 +180,25 @@ fn load_stackerdb(peer: &TestPeer, idx: usize) -> Vec<(SlotMetadata, Vec)> { ret } +fn check_sync_results(network_sync: &NetworkResult) { + for res in network_sync.stacker_db_sync_results.iter() { + assert!(res.num_connections >= res.num_attempted_connections); + } +} + +fn test_reconnect(network: &mut PeerNetwork) { + let mut stacker_db_syncs = network + .stacker_db_syncs + .take() + .expect("FATAL: did not replace stacker dbs"); + + for (_sc, stacker_db_sync) in stacker_db_syncs.iter_mut() { + stacker_db_sync.connect_begin(network).unwrap(); + } + + network.stacker_db_syncs = Some(stacker_db_syncs); +} + #[test] fn test_stackerdb_replica_2_neighbors_1_chunk() { with_timeout(600, || { @@ -234,7 +254,12 @@ fn test_stackerdb_replica_2_neighbors_1_chunk() { let res_1 = peer_1.step_with_ibd(false); let res_2 = peer_2.step_with_ibd(false); + // test that re-connects are limited to 1 per host + test_reconnect(&mut peer_1.network); + test_reconnect(&mut peer_2.network); + if let Ok(mut res) = res_1 { + check_sync_results(&res); Relayer::process_stacker_db_chunks( &mut peer_1.network.stackerdbs, &peer_1_db_configs, @@ -252,6 +277,7 @@ fn test_stackerdb_replica_2_neighbors_1_chunk() { } if let Ok(mut res) = res_2 { + check_sync_results(&res); Relayer::process_stacker_db_chunks( &mut peer_2.network.stackerdbs, &peer_2_db_configs, @@ -354,6 +380,7 @@ fn test_stackerdb_replica_2_neighbors_1_chunk_stale_view() { let res_2 = peer_2.step_with_ibd(false); if let Ok(mut res) = res_1 { + check_sync_results(&res); for sync_res in res.stacker_db_sync_results.iter() { assert_eq!(sync_res.chunks_to_store.len(), 0); if sync_res.stale.len() > 0 { @@ -377,6 +404,7 @@ fn test_stackerdb_replica_2_neighbors_1_chunk_stale_view() { } if let Ok(mut res) = res_2 { + check_sync_results(&res); for sync_res in res.stacker_db_sync_results.iter() { assert_eq!(sync_res.chunks_to_store.len(), 0); if sync_res.stale.len() > 0 { @@ -428,6 +456,7 @@ fn test_stackerdb_replica_2_neighbors_1_chunk_stale_view() { let res_2 = peer_2.step_with_ibd(false); if let Ok(mut res) = res_1 { + check_sync_results(&res); Relayer::process_stacker_db_chunks( &mut peer_1.network.stackerdbs, &peer_1_db_configs, @@ -445,6 +474,7 @@ fn test_stackerdb_replica_2_neighbors_1_chunk_stale_view() { } if let Ok(mut res) = res_2 { + check_sync_results(&res); Relayer::process_stacker_db_chunks( &mut peer_2.network.stackerdbs, &peer_2_db_configs, @@ -550,6 +580,7 @@ fn inner_test_stackerdb_replica_2_neighbors_10_chunks(push_only: bool, base_port let res_2 = peer_2.step_with_ibd(false); if let Ok(mut res) = res_1 { + check_sync_results(&res); Relayer::process_stacker_db_chunks( &mut peer_1.network.stackerdbs, &peer_1_db_configs, @@ -567,6 +598,7 @@ fn inner_test_stackerdb_replica_2_neighbors_10_chunks(push_only: bool, base_port } if let Ok(mut res) = res_2 { + check_sync_results(&res); Relayer::process_stacker_db_chunks( &mut peer_2.network.stackerdbs, &peer_2_db_configs, @@ -686,7 +718,9 @@ fn inner_test_stackerdb_10_replicas_10_neighbors_line_10_chunks(push_only: bool, for i in 0..num_peers { peers[i].network.stacker_db_configs = peer_db_configs[i].clone(); let res = peers[i].step_with_ibd(false); + if let Ok(mut res) = res { + check_sync_results(&res); let rc_consensus_hash = peers[i].network.get_chain_view().rc_consensus_hash.clone(); Relayer::process_stacker_db_chunks( diff --git a/stackslib/src/net/tests/download/epoch2x.rs b/stackslib/src/net/tests/download/epoch2x.rs index 5e9ea0daf2b..2d53c89f9a0 100644 --- a/stackslib/src/net/tests/download/epoch2x.rs +++ b/stackslib/src/net/tests/download/epoch2x.rs @@ -329,10 +329,12 @@ where let mut result = peer.step_dns(&mut dns_clients[i]).unwrap(); let lp = peer.network.local_peer.clone(); + let burnchain = peer.network.burnchain.clone(); peer.with_db_state(|sortdb, chainstate, relayer, mempool| { relayer.process_network_result( &lp, &mut result, + &burnchain, sortdb, chainstate, mempool, @@ -624,7 +626,7 @@ fn make_contract_call_transaction( let chain_tip = StacksBlockHeader::make_index_block_hash(consensus_hash, block_hash); let cur_nonce = chainstate - .with_read_only_clarity_tx(&sortdb.index_conn(), &chain_tip, |clarity_tx| { + .with_read_only_clarity_tx(&sortdb.index_handle_at_tip(), &chain_tip, |clarity_tx| { clarity_tx.with_clarity_db_readonly(|clarity_db| { clarity_db .get_account_nonce(&spending_account.origin_address().unwrap().into()) @@ -807,7 +809,7 @@ pub fn test_get_blocks_and_microblocks_2_peers_download_plain_100_blocks() { StacksBlockBuilder::make_anchored_block_and_microblock_from_txs( builder, chainstate, - &sortdb.index_conn(), + &sortdb.index_handle_at_tip(), vec![coinbase_tx, stack_tx], vec![mblock_tx], ) @@ -1424,7 +1426,7 @@ pub fn test_get_blocks_and_microblocks_2_peers_download_multiple_microblock_desc let (anchored_block, block_size, block_execution_cost) = StacksBlockBuilder::build_anchored_block( chainstate, - &sortdb.index_conn(), + &sortdb.index_handle_at_tip(), &mut mempool, &parent_tip, parent_tip diff --git a/stackslib/src/net/tests/download/nakamoto.rs b/stackslib/src/net/tests/download/nakamoto.rs index 73472c9c565..9de9fb087bf 100644 --- a/stackslib/src/net/tests/download/nakamoto.rs +++ b/stackslib/src/net/tests/download/nakamoto.rs @@ -14,7 +14,7 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -use std::collections::HashMap; +use std::collections::{HashMap, VecDeque}; use std::sync::mpsc::sync_channel; use std::thread; @@ -30,6 +30,7 @@ use stacks_common::util::secp256k1::MessageSignature; use stacks_common::util::vrf::VRFProof; use crate::burnchains::PoxConstants; +use crate::chainstate::burn::db::sortdb::SortitionHandle; use crate::chainstate::burn::BlockSnapshot; use crate::chainstate::nakamoto::test_signers::TestSigners; use crate::chainstate::nakamoto::{NakamotoBlock, NakamotoBlockHeader, NakamotoChainState}; @@ -90,9 +91,9 @@ impl NakamotoDownloadStateMachine { fn test_nakamoto_tenure_downloader() { let ch = ConsensusHash([0x11; 20]); let private_key = StacksPrivateKey::new(); - let mut test_signers = TestSigners::default(); + let mut test_signers = TestSigners::new(vec![]); - let aggregate_public_key = test_signers.aggregate_public_key.clone(); + let reward_set = test_signers.synthesize_reward_set(); let tenure_start_header = NakamotoBlockHeader { version: 1, @@ -102,9 +103,10 @@ fn test_nakamoto_tenure_downloader() { parent_block_id: StacksBlockId([0x05; 32]), tx_merkle_root: Sha512Trunc256Sum([0x06; 32]), state_index_root: TrieHash([0x07; 32]), + timestamp: 8, miner_signature: MessageSignature::empty(), - signer_signature: ThresholdSignature::empty(), - signer_bitvec: BitVec::zeros(1).unwrap(), + signer_signature: vec![], + pox_treatment: BitVec::zeros(1).unwrap(), }; let tenure_change_payload = TenureChangePayload { @@ -116,7 +118,6 @@ fn test_nakamoto_tenure_downloader() { cause: TenureChangeCause::BlockFound, pubkey_hash: Hash160([0x02; 20]), }; - use stacks_common::types::net::PeerAddress; let proof_bytes = hex_bytes("9275df67a68c8745c0ff97b48201ee6db447f7c93b23ae24cdc2400f52fdb08a1a6ac7ec71bf9c9c76e96ee4675ebff60625af28718501047bfd87b810c2d2139b73c23bd69de66360953a642c2a330a").unwrap(); let proof = VRFProof::from_bytes(&proof_bytes[..].to_vec()).unwrap(); @@ -170,9 +171,10 @@ fn test_nakamoto_tenure_downloader() { parent_block_id: last_block.header.block_id(), tx_merkle_root: Sha512Trunc256Sum([0x06; 32]), state_index_root: TrieHash([0x07; 32]), + timestamp: 8, miner_signature: MessageSignature::empty(), - signer_signature: ThresholdSignature::empty(), - signer_bitvec: BitVec::zeros(1).unwrap(), + signer_signature: vec![], + pox_treatment: BitVec::zeros(1).unwrap(), }; let mut block = NakamotoBlock { @@ -191,9 +193,10 @@ fn test_nakamoto_tenure_downloader() { parent_block_id: blocks.last().unwrap().header.block_id(), tx_merkle_root: Sha512Trunc256Sum([0x07; 32]), state_index_root: TrieHash([0x08; 32]), + timestamp: 9, miner_signature: MessageSignature::empty(), - signer_signature: ThresholdSignature::empty(), - signer_bitvec: BitVec::zeros(1).unwrap(), + signer_signature: vec![], + pox_treatment: BitVec::zeros(1).unwrap(), }; let next_tenure_change_payload = TenureChangePayload { @@ -231,8 +234,8 @@ fn test_nakamoto_tenure_downloader() { tenure_start_block.header.block_id(), next_tenure_start_block.header.block_id(), naddr.clone(), - aggregate_public_key.clone(), - aggregate_public_key.clone(), + reward_set.clone(), + reward_set.clone(), ); // must be first block @@ -352,7 +355,7 @@ fn test_nakamoto_unconfirmed_tenure_downloader() { assert_eq!(tip.block_height, 51); - let test_signers = TestSigners::default(); + let test_signers = TestSigners::new(vec![]); let naddr = NeighborAddress { addrbytes: PeerAddress([0xff; 16]), @@ -361,21 +364,33 @@ fn test_nakamoto_unconfirmed_tenure_downloader() { }; peer.refresh_burnchain_view(); - let tip_block_id = StacksBlockId::new(&peer.network.stacks_tip.0, &peer.network.stacks_tip.1); + let tip_block_id = peer.network.stacks_tip.block_id(); - let tip_ch = peer.network.stacks_tip.0.clone(); - let parent_tip_ch = peer.network.parent_stacks_tip.0.clone(); - let agg_pubkeys = peer.network.aggregate_public_keys.clone(); + let tip_ch = peer.network.stacks_tip.consensus_hash.clone(); + let parent_tip_ch = peer.network.parent_stacks_tip.consensus_hash.clone(); + let current_reward_sets = peer.network.current_reward_sets.clone(); + let last_block_in_confirmed_tenure = NakamotoChainState::get_highest_block_header_in_tenure( + &mut peer.chainstate().index_conn(), + &tip_block_id, + &parent_tip_ch, + ) + .unwrap() + .unwrap(); + + // NOTE: we have to account for malleablized blocks! let unconfirmed_tenure = peer .chainstate() .nakamoto_blocks_db() - .get_all_blocks_in_tenure(&tip_ch) + .get_all_blocks_in_tenure(&tip_ch, &tip_block_id) .unwrap(); let last_confirmed_tenure = peer .chainstate() .nakamoto_blocks_db() - .get_all_blocks_in_tenure(&parent_tip_ch) + .get_all_blocks_in_tenure( + &parent_tip_ch, + &last_block_in_confirmed_tenure.index_block_hash(), + ) .unwrap(); let parent_parent_header = NakamotoChainState::get_block_header_nakamoto( @@ -390,7 +405,8 @@ fn test_nakamoto_unconfirmed_tenure_downloader() { .unwrap() .unwrap(); let parent_parent_start_header = NakamotoChainState::get_nakamoto_tenure_start_block_header( - peer.chainstate().db(), + &mut peer.chainstate().index_conn(), + &tip_block_id, &parent_parent_header.consensus_hash, ) .unwrap() @@ -420,48 +436,117 @@ fn test_nakamoto_unconfirmed_tenure_downloader() { .expect("FATAL: burnchain tip before system start"); let highest_confirmed_wanted_tenure = WantedTenure { - tenure_id_consensus_hash: peer.network.parent_stacks_tip.0.clone(), + tenure_id_consensus_hash: peer.network.parent_stacks_tip.consensus_hash.clone(), winning_block_id: parent_parent_start_header.index_block_hash(), processed: false, burn_height: peer.network.burnchain_tip.block_height - 1, }; let unconfirmed_wanted_tenure = WantedTenure { - tenure_id_consensus_hash: peer.network.stacks_tip.0.clone(), + tenure_id_consensus_hash: peer.network.stacks_tip.consensus_hash.clone(), winning_block_id: last_confirmed_tenure .first() .as_ref() .unwrap() .header - .parent_block_id - .clone(), + .block_id(), processed: false, burn_height: peer.network.burnchain_tip.block_height, }; + // we can make unconfirmed tenure downloaders + { + let mut empty_schedule = VecDeque::new(); + let mut full_schedule = { + let mut sched = VecDeque::new(); + sched.push_back(naddr.clone()); + sched + }; + let mut empty_downloaders = HashMap::new(); + let mut full_downloaders = { + let mut dl = HashMap::new(); + let utd = NakamotoUnconfirmedTenureDownloader::new(naddr.clone(), Some(tip_block_id)); + dl.insert(naddr.clone(), utd); + dl + }; + assert_eq!( + NakamotoDownloadStateMachine::make_unconfirmed_tenure_downloaders( + &mut empty_schedule, + 10, + &mut empty_downloaders, + None + ), + 0 + ); + assert_eq!( + NakamotoDownloadStateMachine::make_unconfirmed_tenure_downloaders( + &mut empty_schedule, + 10, + &mut full_downloaders, + None + ), + 0 + ); + assert_eq!( + NakamotoDownloadStateMachine::make_unconfirmed_tenure_downloaders( + &mut full_schedule, + 10, + &mut full_downloaders, + None + ), + 0 + ); + assert_eq!(full_schedule.len(), 1); + assert_eq!( + NakamotoDownloadStateMachine::make_unconfirmed_tenure_downloaders( + &mut full_schedule, + 10, + &mut empty_downloaders, + None + ), + 1 + ); + assert_eq!(full_schedule.len(), 0); + assert_eq!(empty_downloaders.len(), 1); + } + // we've processed the tip already, so we transition straight to the Done state { let mut utd = NakamotoUnconfirmedTenureDownloader::new(naddr.clone(), Some(tip_block_id)); assert_eq!(utd.state, NakamotoUnconfirmedDownloadState::GetTenureInfo); - utd.confirmed_aggregate_public_key = - Some(agg_pubkeys.get(&tip_rc).cloned().unwrap().unwrap()); - utd.unconfirmed_aggregate_public_key = - Some(agg_pubkeys.get(&tip_rc).cloned().unwrap().unwrap()); + utd.confirmed_signer_keys = Some( + current_reward_sets + .get(&tip_rc) + .cloned() + .unwrap() + .reward_cycle_info + .known_selected_anchor_block_owned() + .unwrap(), + ); + utd.unconfirmed_signer_keys = Some( + current_reward_sets + .get(&tip_rc) + .cloned() + .unwrap() + .reward_cycle_info + .known_selected_anchor_block_owned() + .unwrap(), + ); let tenure_tip = RPCGetTenureInfo { - consensus_hash: peer.network.stacks_tip.0.clone(), + consensus_hash: peer.network.stacks_tip.consensus_hash.clone(), tenure_start_block_id: peer.network.tenure_start_block_id.clone(), - parent_consensus_hash: peer.network.parent_stacks_tip.0.clone(), + parent_consensus_hash: peer.network.parent_stacks_tip.consensus_hash.clone(), parent_tenure_start_block_id: StacksBlockId::new( - &peer.network.parent_stacks_tip.0, - &peer.network.parent_stacks_tip.1, + &peer.network.parent_stacks_tip.consensus_hash, + &peer.network.parent_stacks_tip.block_hash, ), tip_block_id: StacksBlockId::new( - &peer.network.stacks_tip.0, - &peer.network.stacks_tip.1, + &peer.network.stacks_tip.consensus_hash, + &peer.network.stacks_tip.block_hash, ), - tip_height: peer.network.stacks_tip.2, + tip_height: peer.network.stacks_tip.height, reward_cycle: tip_rc, }; @@ -472,7 +557,7 @@ fn test_nakamoto_unconfirmed_tenure_downloader() { &sort_tip, peer.chainstate(), tenure_tip.clone(), - &agg_pubkeys, + ¤t_reward_sets, ) .unwrap(); @@ -486,12 +571,7 @@ fn test_nakamoto_unconfirmed_tenure_downloader() { // we can request the highest-complete tenure assert!(!utd.need_highest_complete_tenure(peer.chainstate()).unwrap()); - let ntd = utd - .make_highest_complete_tenure_downloader( - &highest_confirmed_wanted_tenure, - &unconfirmed_wanted_tenure, - ) - .unwrap(); + let ntd = utd.make_highest_complete_tenure_downloader().unwrap(); assert_eq!( ntd.state, NakamotoTenureDownloadState::GetTenureStartBlock( @@ -507,26 +587,40 @@ fn test_nakamoto_unconfirmed_tenure_downloader() { let mut utd = NakamotoUnconfirmedTenureDownloader::new(naddr.clone(), Some(mid_tip_block_id)); - utd.confirmed_aggregate_public_key = - Some(agg_pubkeys.get(&tip_rc).cloned().unwrap().unwrap()); - utd.unconfirmed_aggregate_public_key = - Some(agg_pubkeys.get(&tip_rc).cloned().unwrap().unwrap()); + utd.confirmed_signer_keys = Some( + current_reward_sets + .get(&tip_rc) + .cloned() + .unwrap() + .reward_cycle_info + .known_selected_anchor_block_owned() + .unwrap(), + ); + utd.unconfirmed_signer_keys = Some( + current_reward_sets + .get(&tip_rc) + .cloned() + .unwrap() + .reward_cycle_info + .known_selected_anchor_block_owned() + .unwrap(), + ); assert_eq!(utd.state, NakamotoUnconfirmedDownloadState::GetTenureInfo); let tenure_tip = RPCGetTenureInfo { - consensus_hash: peer.network.stacks_tip.0.clone(), + consensus_hash: peer.network.stacks_tip.consensus_hash.clone(), tenure_start_block_id: peer.network.tenure_start_block_id.clone(), - parent_consensus_hash: peer.network.parent_stacks_tip.0.clone(), + parent_consensus_hash: peer.network.parent_stacks_tip.consensus_hash.clone(), parent_tenure_start_block_id: StacksBlockId::new( - &peer.network.parent_stacks_tip.0, - &peer.network.parent_stacks_tip.1, + &peer.network.parent_stacks_tip.consensus_hash, + &peer.network.parent_stacks_tip.block_hash, ), tip_block_id: StacksBlockId::new( - &peer.network.stacks_tip.0, - &peer.network.stacks_tip.1, + &peer.network.stacks_tip.consensus_hash, + &peer.network.stacks_tip.block_hash, ), - tip_height: peer.network.stacks_tip.2, + tip_height: peer.network.stacks_tip.height, reward_cycle: tip_rc, }; @@ -537,7 +631,7 @@ fn test_nakamoto_unconfirmed_tenure_downloader() { &sort_tip, peer.chainstate(), tenure_tip.clone(), - &agg_pubkeys, + ¤t_reward_sets, ) .unwrap(); @@ -550,7 +644,7 @@ fn test_nakamoto_unconfirmed_tenure_downloader() { assert_eq!( utd.state, NakamotoUnconfirmedDownloadState::GetUnconfirmedTenureBlocks( - tenure_tip.tip_block_id.clone() + tenure_tip.tip_block_id.clone(), ) ); assert_eq!(utd.tenure_tip, Some(tenure_tip.clone())); @@ -575,12 +669,7 @@ fn test_nakamoto_unconfirmed_tenure_downloader() { // we can request the highest-complete tenure assert!(!utd.need_highest_complete_tenure(peer.chainstate()).unwrap()); - let ntd = utd - .make_highest_complete_tenure_downloader( - &highest_confirmed_wanted_tenure, - &unconfirmed_wanted_tenure, - ) - .unwrap(); + let ntd = utd.make_highest_complete_tenure_downloader().unwrap(); assert_eq!( ntd.state, NakamotoTenureDownloadState::GetTenureStartBlock( @@ -596,26 +685,40 @@ fn test_nakamoto_unconfirmed_tenure_downloader() { let mut utd = NakamotoUnconfirmedTenureDownloader::new(naddr.clone(), Some(mid_tip_block_id)); - utd.confirmed_aggregate_public_key = - Some(agg_pubkeys.get(&tip_rc).cloned().unwrap().unwrap()); - utd.unconfirmed_aggregate_public_key = - Some(agg_pubkeys.get(&tip_rc).cloned().unwrap().unwrap()); + utd.confirmed_signer_keys = Some( + current_reward_sets + .get(&tip_rc) + .cloned() + .unwrap() + .reward_cycle_info + .known_selected_anchor_block_owned() + .unwrap(), + ); + utd.unconfirmed_signer_keys = Some( + current_reward_sets + .get(&tip_rc) + .cloned() + .unwrap() + .reward_cycle_info + .known_selected_anchor_block_owned() + .unwrap(), + ); assert_eq!(utd.state, NakamotoUnconfirmedDownloadState::GetTenureInfo); let tenure_tip = RPCGetTenureInfo { - consensus_hash: peer.network.stacks_tip.0.clone(), + consensus_hash: peer.network.stacks_tip.consensus_hash.clone(), tenure_start_block_id: peer.network.tenure_start_block_id.clone(), - parent_consensus_hash: peer.network.parent_stacks_tip.0.clone(), + parent_consensus_hash: peer.network.parent_stacks_tip.consensus_hash.clone(), parent_tenure_start_block_id: StacksBlockId::new( - &peer.network.parent_stacks_tip.0, - &peer.network.parent_stacks_tip.1, + &peer.network.parent_stacks_tip.consensus_hash, + &peer.network.parent_stacks_tip.block_hash, ), tip_block_id: StacksBlockId::new( - &peer.network.stacks_tip.0, - &peer.network.stacks_tip.1, + &peer.network.stacks_tip.consensus_hash, + &peer.network.stacks_tip.block_hash, ), - tip_height: peer.network.stacks_tip.2, + tip_height: peer.network.stacks_tip.height, reward_cycle: tip_rc, }; @@ -626,7 +729,7 @@ fn test_nakamoto_unconfirmed_tenure_downloader() { &sort_tip, peer.chainstate(), tenure_tip.clone(), - &agg_pubkeys, + ¤t_reward_sets, ) .unwrap(); @@ -639,7 +742,7 @@ fn test_nakamoto_unconfirmed_tenure_downloader() { assert_eq!( utd.state, NakamotoUnconfirmedDownloadState::GetUnconfirmedTenureBlocks( - tenure_tip.tip_block_id.clone() + tenure_tip.tip_block_id.clone(), ) ); assert_eq!(utd.tenure_tip, Some(tenure_tip.clone())); @@ -666,12 +769,7 @@ fn test_nakamoto_unconfirmed_tenure_downloader() { // we can request the highest-complete tenure assert!(!utd.need_highest_complete_tenure(peer.chainstate()).unwrap()); - let ntd = utd - .make_highest_complete_tenure_downloader( - &highest_confirmed_wanted_tenure, - &unconfirmed_wanted_tenure, - ) - .unwrap(); + let ntd = utd.make_highest_complete_tenure_downloader().unwrap(); assert_eq!( ntd.state, NakamotoTenureDownloadState::GetTenureStartBlock( @@ -684,26 +782,40 @@ fn test_nakamoto_unconfirmed_tenure_downloader() { // serve all of the unconfirmed blocks in one shot. { let mut utd = NakamotoUnconfirmedTenureDownloader::new(naddr.clone(), None); - utd.confirmed_aggregate_public_key = - Some(agg_pubkeys.get(&tip_rc).cloned().unwrap().unwrap()); - utd.unconfirmed_aggregate_public_key = - Some(agg_pubkeys.get(&tip_rc).cloned().unwrap().unwrap()); + utd.confirmed_signer_keys = Some( + current_reward_sets + .get(&tip_rc) + .cloned() + .unwrap() + .reward_cycle_info + .known_selected_anchor_block_owned() + .unwrap(), + ); + utd.unconfirmed_signer_keys = Some( + current_reward_sets + .get(&tip_rc) + .cloned() + .unwrap() + .reward_cycle_info + .known_selected_anchor_block_owned() + .unwrap(), + ); assert_eq!(utd.state, NakamotoUnconfirmedDownloadState::GetTenureInfo); let tenure_tip = RPCGetTenureInfo { - consensus_hash: peer.network.stacks_tip.0.clone(), + consensus_hash: peer.network.stacks_tip.consensus_hash.clone(), tenure_start_block_id: peer.network.tenure_start_block_id.clone(), - parent_consensus_hash: peer.network.parent_stacks_tip.0.clone(), + parent_consensus_hash: peer.network.parent_stacks_tip.consensus_hash.clone(), parent_tenure_start_block_id: StacksBlockId::new( - &peer.network.parent_stacks_tip.0, - &peer.network.parent_stacks_tip.1, + &peer.network.parent_stacks_tip.consensus_hash, + &peer.network.parent_stacks_tip.block_hash, ), tip_block_id: StacksBlockId::new( - &peer.network.stacks_tip.0, - &peer.network.stacks_tip.1, + &peer.network.stacks_tip.consensus_hash, + &peer.network.stacks_tip.block_hash, ), - tip_height: peer.network.stacks_tip.2, + tip_height: peer.network.stacks_tip.height, reward_cycle: tip_rc, }; @@ -714,7 +826,7 @@ fn test_nakamoto_unconfirmed_tenure_downloader() { &sort_tip, peer.chainstate(), tenure_tip.clone(), - &agg_pubkeys, + ¤t_reward_sets, ) .unwrap(); @@ -734,12 +846,7 @@ fn test_nakamoto_unconfirmed_tenure_downloader() { // we can request the highest-complete tenure assert!(!utd.need_highest_complete_tenure(peer.chainstate()).unwrap()); - let ntd = utd - .make_highest_complete_tenure_downloader( - &highest_confirmed_wanted_tenure, - &unconfirmed_wanted_tenure, - ) - .unwrap(); + let ntd = utd.make_highest_complete_tenure_downloader().unwrap(); assert_eq!( ntd.state, NakamotoTenureDownloadState::GetTenureStartBlock( @@ -751,26 +858,40 @@ fn test_nakamoto_unconfirmed_tenure_downloader() { // bad block signature { let mut utd = NakamotoUnconfirmedTenureDownloader::new(naddr.clone(), None); - utd.confirmed_aggregate_public_key = - Some(agg_pubkeys.get(&tip_rc).cloned().unwrap().unwrap()); - utd.unconfirmed_aggregate_public_key = - Some(agg_pubkeys.get(&tip_rc).cloned().unwrap().unwrap()); + utd.confirmed_signer_keys = Some( + current_reward_sets + .get(&tip_rc) + .cloned() + .unwrap() + .reward_cycle_info + .known_selected_anchor_block_owned() + .unwrap(), + ); + utd.unconfirmed_signer_keys = Some( + current_reward_sets + .get(&tip_rc) + .cloned() + .unwrap() + .reward_cycle_info + .known_selected_anchor_block_owned() + .unwrap(), + ); assert_eq!(utd.state, NakamotoUnconfirmedDownloadState::GetTenureInfo); let tenure_tip = RPCGetTenureInfo { - consensus_hash: peer.network.stacks_tip.0.clone(), + consensus_hash: peer.network.stacks_tip.consensus_hash.clone(), tenure_start_block_id: peer.network.tenure_start_block_id.clone(), - parent_consensus_hash: peer.network.parent_stacks_tip.0.clone(), + parent_consensus_hash: peer.network.parent_stacks_tip.consensus_hash.clone(), parent_tenure_start_block_id: StacksBlockId::new( - &peer.network.parent_stacks_tip.0, - &peer.network.parent_stacks_tip.1, + &peer.network.parent_stacks_tip.consensus_hash, + &peer.network.parent_stacks_tip.block_hash, ), tip_block_id: StacksBlockId::new( - &peer.network.stacks_tip.0, - &peer.network.stacks_tip.1, + &peer.network.stacks_tip.consensus_hash, + &peer.network.stacks_tip.block_hash, ), - tip_height: peer.network.stacks_tip.2, + tip_height: peer.network.stacks_tip.height, reward_cycle: tip_rc, }; @@ -781,7 +902,7 @@ fn test_nakamoto_unconfirmed_tenure_downloader() { &sort_tip, peer.chainstate(), tenure_tip.clone(), - &agg_pubkeys, + ¤t_reward_sets, ) .unwrap(); @@ -796,6 +917,83 @@ fn test_nakamoto_unconfirmed_tenure_downloader() { .try_accept_unconfirmed_tenure_blocks(vec![bad_block]) .is_err()); } + + // Does not consume blocks beyond the highest processed block ID + { + let mut utd = NakamotoUnconfirmedTenureDownloader::new(naddr.clone(), None); + utd.confirmed_signer_keys = Some( + current_reward_sets + .get(&tip_rc) + .cloned() + .unwrap() + .reward_cycle_info + .known_selected_anchor_block_owned() + .unwrap(), + ); + utd.unconfirmed_signer_keys = Some( + current_reward_sets + .get(&tip_rc) + .cloned() + .unwrap() + .reward_cycle_info + .known_selected_anchor_block_owned() + .unwrap(), + ); + + assert_eq!(utd.state, NakamotoUnconfirmedDownloadState::GetTenureInfo); + + let tenure_tip = RPCGetTenureInfo { + consensus_hash: peer.network.stacks_tip.consensus_hash.clone(), + tenure_start_block_id: peer.network.tenure_start_block_id.clone(), + parent_consensus_hash: peer.network.parent_stacks_tip.consensus_hash.clone(), + parent_tenure_start_block_id: StacksBlockId::new( + &peer.network.parent_stacks_tip.consensus_hash, + &peer.network.parent_stacks_tip.block_hash, + ), + tip_block_id: StacksBlockId::new( + &peer.network.stacks_tip.consensus_hash, + &peer.network.stacks_tip.block_hash, + ), + tip_height: peer.network.stacks_tip.height, + reward_cycle: tip_rc, + }; + + let sortdb = peer.sortdb.take().unwrap(); + let sort_tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); + utd.try_accept_tenure_info( + &sortdb, + &sort_tip, + peer.chainstate(), + tenure_tip.clone(), + ¤t_reward_sets, + ) + .unwrap(); + + peer.sortdb = Some(sortdb); + + assert!(utd.unconfirmed_tenure_start_block.is_some()); + + utd.highest_processed_block_id = Some(unconfirmed_tenure[1].header.block_id()); + let res = utd + .try_accept_unconfirmed_tenure_blocks( + unconfirmed_tenure.clone().into_iter().rev().collect(), + ) + .unwrap(); + assert_eq!(res.unwrap().as_slice(), &unconfirmed_tenure[1..]); + + assert_eq!(utd.state, NakamotoUnconfirmedDownloadState::Done); + + // we can request the highest-complete tenure + assert!(!utd.need_highest_complete_tenure(peer.chainstate()).unwrap()); + + let ntd = utd.make_highest_complete_tenure_downloader().unwrap(); + assert_eq!( + ntd.state, + NakamotoTenureDownloadState::GetTenureStartBlock( + unconfirmed_wanted_tenure.winning_block_id.clone() + ) + ); + } } #[test] @@ -1090,8 +1288,9 @@ fn test_make_tenure_downloaders() { assert_eq!(tip.block_height, 51); - let test_signers = TestSigners::default(); - let agg_pubkeys = peer.network.aggregate_public_keys.clone(); + let test_signers = TestSigners::new(vec![]); + let current_reward_sets = peer.network.current_reward_sets.clone(); + let stacks_tip = peer.network.stacks_tip.block_id(); // test load_wanted_tenures() { @@ -1242,11 +1441,13 @@ fn test_make_tenure_downloaders() { ) .unwrap(); + let nakamoto_tip = peer.network.stacks_tip.block_id(); let chainstate = peer.chainstate(); NakamotoDownloadStateMachine::inner_update_processed_wanted_tenures( nakamoto_start, &mut wanted_tenures, chainstate, + &nakamoto_tip, ) .unwrap(); @@ -1273,11 +1474,13 @@ fn test_make_tenure_downloaders() { // but the resulting map is keyed by block ID (and we don't have the first block ID) let wanted_tenures_with_blocks = wanted_tenures[1..].to_vec(); + let nakamoto_tip = peer.network.stacks_tip.block_id(); let chainstate = peer.chainstate(); let mut tenure_start_blocks = HashMap::new(); NakamotoDownloadStateMachine::load_tenure_start_blocks( &wanted_tenures, chainstate, + &nakamoto_tip, &mut tenure_start_blocks, ) .unwrap(); @@ -1559,7 +1762,8 @@ fn test_make_tenure_downloaders() { let chainstate = peer.chainstate(); let start_end = available_tenures.get(&wt.tenure_id_consensus_hash).unwrap(); let hdr = NakamotoChainState::get_nakamoto_tenure_start_block_header( - chainstate.db(), + &mut chainstate.index_conn(), + &stacks_tip, &wt.tenure_id_consensus_hash, ) .unwrap() @@ -1794,7 +1998,7 @@ fn test_make_tenure_downloaders() { &mut available, &tenure_block_ids, 6, - &agg_pubkeys, + ¤t_reward_sets, ); // made all 6 downloaders @@ -1832,7 +2036,7 @@ fn test_make_tenure_downloaders() { &mut available, &tenure_block_ids, 12, - &agg_pubkeys, + ¤t_reward_sets, ); // only made 4 downloaders got created @@ -1891,7 +2095,12 @@ fn test_nakamoto_download_run_2_peers() { let all_sortitions = peer.sortdb().get_all_snapshots().unwrap(); let tip = SortitionDB::get_canonical_burn_chain_tip(peer.sortdb().conn()).unwrap(); - + let nakamoto_tip = peer + .sortdb() + .index_handle(&tip.sortition_id) + .get_nakamoto_tip_block_id() + .unwrap() + .unwrap(); assert_eq!(tip.block_height, 81); // make a neighbor from this peer @@ -1920,8 +2129,9 @@ fn test_nakamoto_download_run_2_peers() { let mut all_block_headers: HashMap = HashMap::new(); for sn in all_sortitions.iter() { - if let Some(header) = NakamotoChainState::get_block_header_by_consensus_hash( - peer.chainstate().db(), + if let Some(header) = NakamotoChainState::get_tenure_start_block_header( + &mut peer.chainstate().index_conn(), + &nakamoto_tip, &sn.consensus_hash, ) .unwrap() @@ -1954,6 +2164,7 @@ fn test_nakamoto_download_run_2_peers() { sn.block_height, &sn.burn_header_hash, ops.len() as u64, + false, ); TestPeer::add_burnchain_block(&boot_peer.config.burnchain, &block_header, ops.clone()); } @@ -2077,6 +2288,12 @@ fn test_nakamoto_unconfirmed_download_run_2_peers() { let all_sortitions = peer.sortdb().get_all_snapshots().unwrap(); let tip = SortitionDB::get_canonical_burn_chain_tip(peer.sortdb().conn()).unwrap(); + let nakamoto_tip = peer + .sortdb() + .index_handle(&tip.sortition_id) + .get_nakamoto_tip_block_id() + .unwrap() + .unwrap(); assert_eq!(tip.block_height, 51); @@ -2106,8 +2323,9 @@ fn test_nakamoto_unconfirmed_download_run_2_peers() { let mut all_block_headers: HashMap = HashMap::new(); for sn in all_sortitions.iter() { - if let Some(header) = NakamotoChainState::get_block_header_by_consensus_hash( - peer.chainstate().db(), + if let Some(header) = NakamotoChainState::get_tenure_start_block_header( + &mut peer.chainstate().index_conn(), + &nakamoto_tip, &sn.consensus_hash, ) .unwrap() @@ -2140,6 +2358,7 @@ fn test_nakamoto_unconfirmed_download_run_2_peers() { sn.block_height, &sn.burn_header_hash, ops.len() as u64, + false, ); TestPeer::add_burnchain_block(&boot_peer.config.burnchain, &block_header, ops.clone()); } diff --git a/stackslib/src/net/tests/httpcore.rs b/stackslib/src/net/tests/httpcore.rs index 0a9bc4f7f15..1837d8e1c47 100644 --- a/stackslib/src/net/tests/httpcore.rs +++ b/stackslib/src/net/tests/httpcore.rs @@ -21,6 +21,7 @@ use std::str; use stacks_common::codec::StacksMessageCodec; use stacks_common::types::chainstate::{StacksAddress, StacksBlockId, StacksPrivateKey}; use stacks_common::types::net::{PeerAddress, PeerHost}; +use stacks_common::types::StacksEpochId; use stacks_common::util::chunked_encoding::{ HttpChunkedTransferWriter, HttpChunkedTransferWriterState, }; @@ -442,7 +443,7 @@ fn test_http_response_type_codec() { "6d430bb91222408e7706c9001cfaeb91b08c2be6d5ac95779ab52c6b431950e001", ) .unwrap(); - let test_block_info = make_codec_test_block(5); + let test_block_info = make_codec_test_block(5, StacksEpochId::latest()); let test_microblock_info = make_sample_microblock_stream(&privk, &test_block_info.block_hash()); let mut test_block_info_bytes = vec![]; diff --git a/stackslib/src/net/tests/inv/epoch2x.rs b/stackslib/src/net/tests/inv/epoch2x.rs index 3d349e76795..e31b6dc593a 100644 --- a/stackslib/src/net/tests/inv/epoch2x.rs +++ b/stackslib/src/net/tests/inv/epoch2x.rs @@ -1241,6 +1241,89 @@ fn test_sync_inv_diagnose_nack() { ); } +#[test] +fn test_inv_sync_start_reward_cycle() { + let mut peer_1_config = TestPeerConfig::new(function_name!(), 0, 0); + peer_1_config.connection_opts.inv_reward_cycles = 0; + + let mut peer_1 = TestPeer::new(peer_1_config); + + let num_blocks = (GETPOXINV_MAX_BITLEN * 2) as u64; + for i in 0..num_blocks { + let (burn_ops, stacks_block, microblocks) = peer_1.make_default_tenure(); + peer_1.next_burnchain_block(burn_ops.clone()); + peer_1.process_stacks_epoch_at_tip(&stacks_block, µblocks); + } + + let _ = peer_1.step(); + + let block_scan_start = peer_1 + .network + .get_block_scan_start(peer_1.sortdb.as_ref().unwrap()); + assert_eq!(block_scan_start, 7); + + peer_1.network.connection_opts.inv_reward_cycles = 1; + + let block_scan_start = peer_1 + .network + .get_block_scan_start(peer_1.sortdb.as_ref().unwrap()); + assert_eq!(block_scan_start, 7); + + peer_1.network.connection_opts.inv_reward_cycles = 2; + + let block_scan_start = peer_1 + .network + .get_block_scan_start(peer_1.sortdb.as_ref().unwrap()); + assert_eq!(block_scan_start, 6); + + peer_1.network.connection_opts.inv_reward_cycles = 3; + + let block_scan_start = peer_1 + .network + .get_block_scan_start(peer_1.sortdb.as_ref().unwrap()); + assert_eq!(block_scan_start, 5); + + peer_1.network.connection_opts.inv_reward_cycles = 300; + + let block_scan_start = peer_1 + .network + .get_block_scan_start(peer_1.sortdb.as_ref().unwrap()); + assert_eq!(block_scan_start, 0); +} + +#[test] +fn test_inv_sync_check_peer_epoch2x_synced() { + let mut peer_1_config = TestPeerConfig::new(function_name!(), 0, 0); + peer_1_config.connection_opts.inv_reward_cycles = 0; + + let mut peer_1 = TestPeer::new(peer_1_config); + + let num_blocks = (GETPOXINV_MAX_BITLEN * 2) as u64; + for i in 0..num_blocks { + let (burn_ops, stacks_block, microblocks) = peer_1.make_default_tenure(); + peer_1.next_burnchain_block(burn_ops.clone()); + peer_1.process_stacks_epoch_at_tip(&stacks_block, µblocks); + } + + let _ = peer_1.step(); + let tip_rc = peer_1 + .network + .burnchain + .block_height_to_reward_cycle(peer_1.network.burnchain_tip.block_height) + .unwrap(); + assert!(tip_rc > 0); + + let pox_rc = peer_1.network.pox_id.num_inventory_reward_cycles() as u64; + + assert!(peer_1.network.check_peer_epoch2x_synced(true, tip_rc)); + assert!(peer_1.network.check_peer_epoch2x_synced(true, tip_rc + 1)); + assert!(!peer_1.network.check_peer_epoch2x_synced(true, tip_rc - 1)); + + assert!(peer_1.network.check_peer_epoch2x_synced(false, pox_rc)); + assert!(peer_1.network.check_peer_epoch2x_synced(false, pox_rc + 1)); + assert!(!peer_1.network.check_peer_epoch2x_synced(false, pox_rc - 1)); +} + #[test] #[ignore] fn test_sync_inv_2_peers_plain() { @@ -1248,12 +1331,15 @@ fn test_sync_inv_2_peers_plain() { let mut peer_1_config = TestPeerConfig::new(function_name!(), 0, 0); let mut peer_2_config = TestPeerConfig::new(function_name!(), 0, 0); - peer_1_config.add_neighbor(&peer_2_config.to_neighbor()); - peer_2_config.add_neighbor(&peer_1_config.to_neighbor()); + peer_1_config.connection_opts.inv_reward_cycles = 10; + peer_2_config.connection_opts.inv_reward_cycles = 10; let mut peer_1 = TestPeer::new(peer_1_config); let mut peer_2 = TestPeer::new(peer_2_config); + peer_1.add_neighbor(&mut peer_2.to_neighbor(), None, true); + peer_2.add_neighbor(&mut peer_1.to_neighbor(), None, true); + let num_blocks = (GETPOXINV_MAX_BITLEN * 2) as u64; let first_stacks_block_height = { let sn = @@ -1422,12 +1508,15 @@ fn test_sync_inv_2_peers_stale() { let mut peer_1_config = TestPeerConfig::new(function_name!(), 0, 0); let mut peer_2_config = TestPeerConfig::new(function_name!(), 0, 0); - peer_1_config.add_neighbor(&peer_2_config.to_neighbor()); - peer_2_config.add_neighbor(&peer_1_config.to_neighbor()); + peer_1_config.connection_opts.inv_reward_cycles = 10; + peer_2_config.connection_opts.inv_reward_cycles = 10; let mut peer_1 = TestPeer::new(peer_1_config); let mut peer_2 = TestPeer::new(peer_2_config); + peer_1.add_neighbor(&mut peer_2.to_neighbor(), None, true); + peer_2.add_neighbor(&mut peer_1.to_neighbor(), None, true); + let num_blocks = (GETPOXINV_MAX_BITLEN * 2) as u64; let first_stacks_block_height = { let sn = @@ -1525,14 +1614,17 @@ fn test_sync_inv_2_peers_unstable() { let mut peer_1_config = TestPeerConfig::new(function_name!(), 0, 0); let mut peer_2_config = TestPeerConfig::new(function_name!(), 0, 0); - let stable_confs = peer_1_config.burnchain.stable_confirmations as u64; + peer_1_config.connection_opts.inv_reward_cycles = 10; + peer_2_config.connection_opts.inv_reward_cycles = 10; - peer_1_config.add_neighbor(&peer_2_config.to_neighbor()); - peer_2_config.add_neighbor(&peer_1_config.to_neighbor()); + let stable_confs = peer_1_config.burnchain.stable_confirmations as u64; let mut peer_1 = TestPeer::new(peer_1_config); let mut peer_2 = TestPeer::new(peer_2_config); + peer_1.add_neighbor(&mut peer_2.to_neighbor(), None, true); + peer_2.add_neighbor(&mut peer_1.to_neighbor(), None, true); + let num_blocks = (GETPOXINV_MAX_BITLEN * 2) as u64; let first_stacks_block_height = { @@ -1559,7 +1651,7 @@ fn test_sync_inv_2_peers_unstable() { } else { // peer 1 diverges test_debug!("Peer 1 diverges at {}", i + first_stacks_block_height); - peer_1.next_burnchain_block(vec![]); + peer_1.next_burnchain_block_diverge(vec![burn_ops[0].clone()]); } } @@ -1734,8 +1826,8 @@ fn test_sync_inv_2_peers_different_pox_vectors() { let mut peer_1_config = TestPeerConfig::new(function_name!(), 0, 0); let mut peer_2_config = TestPeerConfig::new(function_name!(), 0, 0); - peer_1_config.add_neighbor(&peer_2_config.to_neighbor()); - peer_2_config.add_neighbor(&peer_1_config.to_neighbor()); + peer_1_config.connection_opts.inv_reward_cycles = 10; + peer_2_config.connection_opts.inv_reward_cycles = 10; let reward_cycle_length = peer_1_config.burnchain.pox_constants.reward_cycle_length as u64; assert_eq!(reward_cycle_length, 5); @@ -1743,6 +1835,9 @@ fn test_sync_inv_2_peers_different_pox_vectors() { let mut peer_1 = TestPeer::new(peer_1_config); let mut peer_2 = TestPeer::new(peer_2_config); + peer_1.add_neighbor(&mut peer_2.to_neighbor(), None, true); + peer_2.add_neighbor(&mut peer_1.to_neighbor(), None, true); + let num_blocks = (GETPOXINV_MAX_BITLEN * 3) as u64; let first_stacks_block_height = { diff --git a/stackslib/src/net/tests/inv/nakamoto.rs b/stackslib/src/net/tests/inv/nakamoto.rs index ca5c0818db4..fd9f1dcc1f6 100644 --- a/stackslib/src/net/tests/inv/nakamoto.rs +++ b/stackslib/src/net/tests/inv/nakamoto.rs @@ -20,6 +20,7 @@ use std::sync::mpsc::sync_channel; use std::thread; use std::thread::JoinHandle; +use clarity::vm::types::PrincipalData; use stacks_common::address::{AddressHashMode, C32_ADDRESS_VERSION_TESTNET_SINGLESIG}; use stacks_common::codec::{read_next, StacksMessageCodec}; use stacks_common::types::chainstate::{StacksAddress, StacksPrivateKey, StacksPublicKey}; @@ -34,6 +35,7 @@ use crate::chainstate::nakamoto::coordinator::tests::{ simple_nakamoto_coordinator_10_tenures_10_sortitions, simple_nakamoto_coordinator_2_tenures_3_sortitions, }; +use crate::chainstate::nakamoto::tests::node::TestStacker; use crate::chainstate::nakamoto::NakamotoChainState; use crate::chainstate::stacks::db::StacksChainState; use crate::chainstate::stacks::{ @@ -170,6 +172,7 @@ fn test_nakamoto_inv_10_tenures_10_sortitions() { let chainstate = &mut peer.stacks_node.as_mut().unwrap().chainstate; let sort_db = peer.sortdb.as_mut().unwrap(); + let stacks_tip = peer.network.stacks_tip.block_id(); let mut inv_generator = InvGenerator::new(); @@ -180,7 +183,7 @@ fn test_nakamoto_inv_10_tenures_10_sortitions() { // check the reward cycles for (rc, inv) in reward_cycle_invs.into_iter().enumerate() { let bitvec = inv_generator - .make_tenure_bitvector(&tip, sort_db, chainstate, rc as u64) + .make_tenure_bitvector(&tip, sort_db, chainstate, &stacks_tip, rc as u64) .unwrap(); debug!( "At reward cycle {}: {:?}, mesasge = {:?}", @@ -231,6 +234,7 @@ fn test_nakamoto_inv_2_tenures_3_sortitions() { let chainstate = &mut peer.stacks_node.as_mut().unwrap().chainstate; let sort_db = peer.sortdb.as_mut().unwrap(); + let stacks_tip = peer.network.stacks_tip.block_id(); let mut inv_generator = InvGenerator::new(); @@ -240,7 +244,7 @@ fn test_nakamoto_inv_2_tenures_3_sortitions() { for (rc, inv) in reward_cycle_invs.into_iter().enumerate() { let bitvec = inv_generator - .make_tenure_bitvector(&tip, sort_db, chainstate, rc as u64) + .make_tenure_bitvector(&tip, sort_db, chainstate, &stacks_tip, rc as u64) .unwrap(); debug!( "At reward cycle {}: {:?}, mesasge = {:?}", @@ -283,6 +287,7 @@ fn test_nakamoto_inv_10_extended_tenures_10_sortitions() { let chainstate = &mut peer.stacks_node.as_mut().unwrap().chainstate; let sort_db = peer.sortdb.as_mut().unwrap(); + let stacks_tip = peer.network.stacks_tip.block_id(); let mut inv_generator = InvGenerator::new(); @@ -292,7 +297,7 @@ fn test_nakamoto_inv_10_extended_tenures_10_sortitions() { for (rc, inv) in reward_cycle_invs.into_iter().enumerate() { let bitvec = inv_generator - .make_tenure_bitvector(&tip, sort_db, chainstate, rc as u64) + .make_tenure_bitvector(&tip, sort_db, chainstate, &stacks_tip, rc as u64) .unwrap(); debug!("At reward cycle {}: {:?}", rc, &bitvec); @@ -332,6 +337,49 @@ pub fn make_nakamoto_peers_from_invs<'a>( prepare_len: u32, bitvecs: Vec>, num_peers: usize, +) -> (TestPeer<'a>, Vec>) { + inner_make_nakamoto_peers_from_invs( + test_name, + observer, + rc_len, + prepare_len, + bitvecs, + num_peers, + vec![], + ) +} + +/// NOTE: The second return value does _not_ need `<'a>`, since `observer` is never installed into +/// the peers here. However, it appears unavoidable to the borrow-checker. +pub fn make_nakamoto_peers_from_invs_and_balances<'a>( + test_name: &str, + observer: &'a TestEventObserver, + rc_len: u32, + prepare_len: u32, + bitvecs: Vec>, + num_peers: usize, + initial_balances: Vec<(PrincipalData, u64)>, +) -> (TestPeer<'a>, Vec>) { + inner_make_nakamoto_peers_from_invs( + test_name, + observer, + rc_len, + prepare_len, + bitvecs, + num_peers, + initial_balances, + ) +} + +/// Make peers from inventories and balances +fn inner_make_nakamoto_peers_from_invs<'a>( + test_name: &str, + observer: &'a TestEventObserver, + rc_len: u32, + prepare_len: u32, + bitvecs: Vec>, + num_peers: usize, + mut initial_balances: Vec<(PrincipalData, u64)>, ) -> (TestPeer<'a>, Vec>) { for bitvec in bitvecs.iter() { assert_eq!(bitvec.len() as u32, rc_len); @@ -406,11 +454,19 @@ pub fn make_nakamoto_peers_from_invs<'a>( } } + // make malleablized blocks + let (test_signers, test_stackers) = TestStacker::multi_signing_set(&[ + 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, + ]); + + initial_balances.push((addr.into(), 1_000_000)); let plan = NakamotoBootPlan::new(test_name) .with_private_key(private_key) .with_pox_constants(rc_len, prepare_len) - .with_initial_balances(vec![(addr.into(), 1_000_000)]) - .with_extra_peers(num_peers); + .with_initial_balances(initial_balances) + .with_extra_peers(num_peers) + .with_test_signers(test_signers) + .with_test_stackers(test_stackers); let (peer, other_peers) = plan.boot_into_nakamoto_peers(boot_tenures, Some(observer)); (peer, other_peers) diff --git a/stackslib/src/net/tests/mempool/mod.rs b/stackslib/src/net/tests/mempool/mod.rs new file mode 100644 index 00000000000..7a44a56788d --- /dev/null +++ b/stackslib/src/net/tests/mempool/mod.rs @@ -0,0 +1,1302 @@ +// Copyright (C) 2013-2020 Blockstack PBC, a public benefit corporation +// Copyright (C) 2020-2024 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use std::cell::RefCell; +use std::{thread, time}; + +use clarity::vm::ast::stack_depth_checker::AST_CALL_STACK_DEPTH_BUFFER; +use clarity::vm::types::StacksAddressExtensions; +use clarity::vm::MAX_CALL_STACK_DEPTH; +use rand; +use rand::RngCore; +use stacks_common::types::chainstate::BurnchainHeaderHash; +use stacks_common::util::secp256k1::Secp256k1PrivateKey; +use stacks_common::util::{log, sleep_ms}; + +use super::*; +use crate::burnchains::burnchain::*; +use crate::burnchains::*; +use crate::chainstate::nakamoto::coordinator::tests::make_token_transfer; +use crate::chainstate::stacks::test::*; +use crate::chainstate::stacks::*; +use crate::core::StacksEpochExtension; +use crate::net::atlas::*; +use crate::net::codec::*; +use crate::net::db::*; +use crate::net::test::*; +use crate::net::tests::inv::nakamoto::make_nakamoto_peers_from_invs_and_balances; +use crate::net::tests::relay::epoch2x::make_contract_tx; +use crate::net::*; +use crate::util_lib::test::*; + +#[test] +fn test_mempool_sync_2_peers() { + // peer 1 gets some transactions; verify peer 2 gets the recent ones and not the old + // ones + let mut peer_1_config = TestPeerConfig::new(function_name!(), 0, 0); + let mut peer_2_config = TestPeerConfig::new(function_name!(), 0, 0); + + peer_1_config.connection_opts.mempool_sync_interval = 1; + peer_2_config.connection_opts.mempool_sync_interval = 1; + + let num_txs = 10; + let pks: Vec<_> = (0..num_txs).map(|_| StacksPrivateKey::new()).collect(); + let addrs: Vec<_> = pks.iter().map(|pk| to_addr(pk)).collect(); + let initial_balances: Vec<_> = addrs + .iter() + .map(|a| (a.to_account_principal(), 1000000000)) + .collect(); + + peer_1_config.initial_balances = initial_balances.clone(); + peer_2_config.initial_balances = initial_balances.clone(); + + let mut peer_1 = TestPeer::new(peer_1_config); + let mut peer_2 = TestPeer::new(peer_2_config); + + peer_1.add_neighbor(&mut peer_2.to_neighbor(), None, true); + peer_2.add_neighbor(&mut peer_1.to_neighbor(), None, true); + + let num_blocks = 10; + let first_stacks_block_height = { + let sn = SortitionDB::get_canonical_burn_chain_tip(&peer_1.sortdb.as_ref().unwrap().conn()) + .unwrap(); + sn.block_height + 1 + }; + + for i in 0..(num_blocks / 2) { + let (burn_ops, stacks_block, microblocks) = peer_2.make_default_tenure(); + + peer_1.next_burnchain_block(burn_ops.clone()); + peer_2.next_burnchain_block(burn_ops.clone()); + + peer_1.process_stacks_epoch_at_tip(&stacks_block, µblocks); + peer_2.process_stacks_epoch_at_tip(&stacks_block, µblocks); + } + + let addr = StacksAddress { + version: C32_ADDRESS_VERSION_TESTNET_SINGLESIG, + bytes: Hash160([0xff; 20]), + }; + + let stacks_tip_ch = peer_1.network.stacks_tip.consensus_hash.clone(); + let stacks_tip_bhh = peer_1.network.stacks_tip.block_hash.clone(); + + // old transactions + let num_txs = 10; + let mut old_txs = HashMap::new(); + let mut peer_1_mempool = peer_1.mempool.take().unwrap(); + let mut mempool_tx = peer_1_mempool.tx_begin().unwrap(); + for i in 0..num_txs { + let pk = &pks[i]; + let mut tx = StacksTransaction { + version: TransactionVersion::Testnet, + chain_id: 0x80000000, + auth: TransactionAuth::from_p2pkh(&pk).unwrap(), + anchor_mode: TransactionAnchorMode::Any, + post_condition_mode: TransactionPostConditionMode::Allow, + post_conditions: vec![], + payload: TransactionPayload::TokenTransfer( + addr.to_account_principal(), + 123, + TokenTransferMemo([0u8; 34]), + ), + }; + tx.set_tx_fee(1000); + tx.set_origin_nonce(0); + + let mut tx_signer = StacksTransactionSigner::new(&tx); + tx_signer.sign_origin(&pk).unwrap(); + + let tx = tx_signer.get_tx().unwrap(); + + let txid = tx.txid(); + let tx_bytes = tx.serialize_to_vec(); + let origin_addr = tx.origin_address(); + let origin_nonce = tx.get_origin_nonce(); + let sponsor_addr = tx.sponsor_address().unwrap_or(origin_addr.clone()); + let sponsor_nonce = tx.get_sponsor_nonce().unwrap_or(origin_nonce); + let tx_fee = tx.get_tx_fee(); + + old_txs.insert(tx.txid(), tx.clone()); + + // should succeed + MemPoolDB::try_add_tx( + &mut mempool_tx, + peer_1.chainstate(), + &stacks_tip_ch, + &stacks_tip_bhh, + true, + txid.clone(), + tx_bytes, + tx_fee, + (num_blocks / 2) as u64, + &origin_addr, + origin_nonce, + &sponsor_addr, + sponsor_nonce, + None, + ) + .unwrap(); + + eprintln!("Added {} {}", i, &txid); + } + mempool_tx.commit().unwrap(); + peer_1.mempool = Some(peer_1_mempool); + + // keep mining to make these txs old + for i in (num_blocks / 2)..num_blocks { + let (burn_ops, stacks_block, microblocks) = peer_2.make_default_tenure(); + + peer_1.next_burnchain_block(burn_ops.clone()); + peer_2.next_burnchain_block(burn_ops.clone()); + + peer_1.process_stacks_epoch_at_tip(&stacks_block, µblocks); + peer_2.process_stacks_epoch_at_tip(&stacks_block, µblocks); + } + + let num_burn_blocks = { + let sn = SortitionDB::get_canonical_burn_chain_tip(peer_1.sortdb.as_ref().unwrap().conn()) + .unwrap(); + sn.block_height + 1 + }; + + let stacks_tip_ch = peer_1.network.stacks_tip.consensus_hash.clone(); + let stacks_tip_bhh = peer_1.network.stacks_tip.block_hash.clone(); + + let mut txs = HashMap::new(); + let mut peer_1_mempool = peer_1.mempool.take().unwrap(); + let mut mempool_tx = peer_1_mempool.tx_begin().unwrap(); + for i in 0..num_txs { + let pk = &pks[i]; + let mut tx = StacksTransaction { + version: TransactionVersion::Testnet, + chain_id: 0x80000000, + auth: TransactionAuth::from_p2pkh(&pk).unwrap(), + anchor_mode: TransactionAnchorMode::Any, + post_condition_mode: TransactionPostConditionMode::Allow, + post_conditions: vec![], + payload: TransactionPayload::TokenTransfer( + addr.to_account_principal(), + 123, + TokenTransferMemo([0u8; 34]), + ), + }; + tx.set_tx_fee(1000); + tx.set_origin_nonce(1); + + let mut tx_signer = StacksTransactionSigner::new(&tx); + tx_signer.sign_origin(&pk).unwrap(); + + let tx = tx_signer.get_tx().unwrap(); + + let txid = tx.txid(); + let tx_bytes = tx.serialize_to_vec(); + let origin_addr = tx.origin_address(); + let origin_nonce = tx.get_origin_nonce(); + let sponsor_addr = tx.sponsor_address().unwrap_or(origin_addr.clone()); + let sponsor_nonce = tx.get_sponsor_nonce().unwrap_or(origin_nonce); + let tx_fee = tx.get_tx_fee(); + + txs.insert(tx.txid(), tx.clone()); + + // should succeed + MemPoolDB::try_add_tx( + &mut mempool_tx, + peer_1.chainstate(), + &stacks_tip_ch, + &stacks_tip_bhh, + true, + txid.clone(), + tx_bytes, + tx_fee, + num_blocks as u64, + &origin_addr, + origin_nonce, + &sponsor_addr, + sponsor_nonce, + None, + ) + .unwrap(); + + eprintln!("Added {} {}", i, &txid); + } + mempool_tx.commit().unwrap(); + peer_1.mempool = Some(peer_1_mempool); + + let mut round = 0; + let mut peer_1_mempool_txs = 0; + let mut peer_2_mempool_txs = 0; + + while peer_1_mempool_txs < num_txs || peer_2_mempool_txs < num_txs { + if let Ok(mut result) = peer_1.step_with_ibd(false) { + let lp = peer_1.network.local_peer.clone(); + let burnchain = peer_1.network.burnchain.clone(); + peer_1 + .with_db_state(|sortdb, chainstate, relayer, mempool| { + relayer.process_network_result( + &lp, + &mut result, + &burnchain, + sortdb, + chainstate, + mempool, + false, + None, + None, + ) + }) + .unwrap(); + } + + if let Ok(mut result) = peer_2.step_with_ibd(false) { + let lp = peer_2.network.local_peer.clone(); + let burnchain = peer_2.network.burnchain.clone(); + peer_2 + .with_db_state(|sortdb, chainstate, relayer, mempool| { + relayer.process_network_result( + &lp, + &mut result, + &burnchain, + sortdb, + chainstate, + mempool, + false, + None, + None, + ) + }) + .unwrap(); + } + + round += 1; + + let mp = peer_1.mempool.take().unwrap(); + peer_1_mempool_txs = MemPoolDB::get_all_txs(mp.conn()).unwrap().len(); + peer_1.mempool.replace(mp); + + let mp = peer_2.mempool.take().unwrap(); + peer_2_mempool_txs = MemPoolDB::get_all_txs(mp.conn()).unwrap().len(); + peer_2.mempool.replace(mp); + + info!( + "Peer 1: {}, Peer 2: {}", + peer_1_mempool_txs, peer_2_mempool_txs + ); + } + + info!("Completed mempool sync in {} step(s)", round); + + let mp = peer_2.mempool.take().unwrap(); + let peer_2_mempool_txs = MemPoolDB::get_all_txs(mp.conn()).unwrap(); + peer_2.mempool.replace(mp); + + // peer 2 has all the recent txs + // peer 2 has none of the old ones + for tx in peer_2_mempool_txs { + assert_eq!(&tx.tx, txs.get(&tx.tx.txid()).unwrap()); + assert!(old_txs.get(&tx.tx.txid()).is_none()); + } +} + +#[test] +fn test_mempool_sync_2_peers_paginated() { + // peer 1 gets some transactions; verify peer 2 gets them all + let mut peer_1_config = TestPeerConfig::new(function_name!(), 0, 0); + let mut peer_2_config = TestPeerConfig::new(function_name!(), 0, 0); + + peer_1_config.connection_opts.mempool_sync_interval = 1; + peer_2_config.connection_opts.mempool_sync_interval = 1; + + let num_txs = 1024; + let pks: Vec<_> = (0..num_txs).map(|_| StacksPrivateKey::new()).collect(); + let addrs: Vec<_> = pks.iter().map(|pk| to_addr(pk)).collect(); + let initial_balances: Vec<_> = addrs + .iter() + .map(|a| (a.to_account_principal(), 1000000000)) + .collect(); + + peer_1_config.initial_balances = initial_balances.clone(); + peer_2_config.initial_balances = initial_balances.clone(); + + let mut peer_1 = TestPeer::new(peer_1_config); + let mut peer_2 = TestPeer::new(peer_2_config); + + peer_1.add_neighbor(&mut peer_2.to_neighbor(), None, true); + peer_2.add_neighbor(&mut peer_1.to_neighbor(), None, true); + + let num_blocks = 10; + let first_stacks_block_height = { + let sn = SortitionDB::get_canonical_burn_chain_tip(&peer_1.sortdb.as_ref().unwrap().conn()) + .unwrap(); + sn.block_height + 1 + }; + + for i in 0..num_blocks { + let (burn_ops, stacks_block, microblocks) = peer_2.make_default_tenure(); + + peer_1.next_burnchain_block(burn_ops.clone()); + peer_2.next_burnchain_block(burn_ops.clone()); + + peer_1.process_stacks_epoch_at_tip(&stacks_block, µblocks); + peer_2.process_stacks_epoch_at_tip(&stacks_block, µblocks); + } + + let addr = StacksAddress { + version: C32_ADDRESS_VERSION_TESTNET_SINGLESIG, + bytes: Hash160([0xff; 20]), + }; + + let stacks_tip_ch = peer_1.network.stacks_tip.consensus_hash.clone(); + let stacks_tip_bhh = peer_1.network.stacks_tip.block_hash.clone(); + + // fill peer 1 with lots of transactions + let mut txs = HashMap::new(); + let mut peer_1_mempool = peer_1.mempool.take().unwrap(); + let mut mempool_tx = peer_1_mempool.tx_begin().unwrap(); + for i in 0..num_txs { + let pk = &pks[i]; + let mut tx = StacksTransaction { + version: TransactionVersion::Testnet, + chain_id: 0x80000000, + auth: TransactionAuth::from_p2pkh(&pk).unwrap(), + anchor_mode: TransactionAnchorMode::Any, + post_condition_mode: TransactionPostConditionMode::Allow, + post_conditions: vec![], + payload: TransactionPayload::TokenTransfer( + addr.to_account_principal(), + 123, + TokenTransferMemo([0u8; 34]), + ), + }; + tx.set_tx_fee(1000); + tx.set_origin_nonce(0); + + let mut tx_signer = StacksTransactionSigner::new(&tx); + tx_signer.sign_origin(&pk).unwrap(); + + let tx = tx_signer.get_tx().unwrap(); + + let txid = tx.txid(); + let tx_bytes = tx.serialize_to_vec(); + let origin_addr = tx.origin_address(); + let origin_nonce = tx.get_origin_nonce(); + let sponsor_addr = tx.sponsor_address().unwrap_or(origin_addr.clone()); + let sponsor_nonce = tx.get_sponsor_nonce().unwrap_or(origin_nonce); + let tx_fee = tx.get_tx_fee(); + + txs.insert(tx.txid(), tx.clone()); + + // should succeed + MemPoolDB::try_add_tx( + &mut mempool_tx, + peer_1.chainstate(), + &stacks_tip_ch, + &stacks_tip_bhh, + true, + txid.clone(), + tx_bytes, + tx_fee, + num_blocks, + &origin_addr, + origin_nonce, + &sponsor_addr, + sponsor_nonce, + None, + ) + .unwrap(); + + eprintln!("Added {} {}", i, &txid); + } + mempool_tx.commit().unwrap(); + peer_1.mempool = Some(peer_1_mempool); + + let num_burn_blocks = { + let sn = SortitionDB::get_canonical_burn_chain_tip(peer_1.sortdb.as_ref().unwrap().conn()) + .unwrap(); + sn.block_height + 1 + }; + + let mut round = 0; + let mut peer_1_mempool_txs = 0; + let mut peer_2_mempool_txs = 0; + + while peer_1_mempool_txs < num_txs || peer_2_mempool_txs < num_txs { + if let Ok(mut result) = peer_1.step_with_ibd(false) { + let lp = peer_1.network.local_peer.clone(); + let burnchain = peer_1.network.burnchain.clone(); + peer_1 + .with_db_state(|sortdb, chainstate, relayer, mempool| { + relayer.process_network_result( + &lp, + &mut result, + &burnchain, + sortdb, + chainstate, + mempool, + false, + None, + None, + ) + }) + .unwrap(); + } + + if let Ok(mut result) = peer_2.step_with_ibd(false) { + let lp = peer_2.network.local_peer.clone(); + let burnchain = peer_2.network.burnchain.clone(); + peer_2 + .with_db_state(|sortdb, chainstate, relayer, mempool| { + relayer.process_network_result( + &lp, + &mut result, + &burnchain, + sortdb, + chainstate, + mempool, + false, + None, + None, + ) + }) + .unwrap(); + } + + round += 1; + + let mp = peer_1.mempool.take().unwrap(); + peer_1_mempool_txs = MemPoolDB::get_all_txs(mp.conn()).unwrap().len(); + peer_1.mempool.replace(mp); + + let mp = peer_2.mempool.take().unwrap(); + peer_2_mempool_txs = MemPoolDB::get_all_txs(mp.conn()).unwrap().len(); + peer_2.mempool.replace(mp); + + info!( + "Peer 1: {}, Peer 2: {}", + peer_1_mempool_txs, peer_2_mempool_txs + ); + } + + info!("Completed mempool sync in {} step(s)", round); + + let mp = peer_2.mempool.take().unwrap(); + let peer_2_mempool_txs = MemPoolDB::get_all_txs(mp.conn()).unwrap(); + peer_2.mempool.replace(mp); + + for tx in peer_2_mempool_txs { + assert_eq!(&tx.tx, txs.get(&tx.tx.txid()).unwrap()); + } +} + +#[test] +fn test_mempool_sync_2_peers_blacklisted() { + // peer 1 gets some transactions; peer 2 blacklists some of them; + // verify peer 2 gets only the non-blacklisted ones. + let mut peer_1_config = TestPeerConfig::new(function_name!(), 0, 0); + let mut peer_2_config = TestPeerConfig::new(function_name!(), 0, 0); + + peer_1_config.connection_opts.mempool_sync_interval = 1; + peer_2_config.connection_opts.mempool_sync_interval = 1; + + let num_txs = 1024; + let pks: Vec<_> = (0..num_txs).map(|_| StacksPrivateKey::new()).collect(); + let addrs: Vec<_> = pks.iter().map(|pk| to_addr(pk)).collect(); + let initial_balances: Vec<_> = addrs + .iter() + .map(|a| (a.to_account_principal(), 1000000000)) + .collect(); + + peer_1_config.initial_balances = initial_balances.clone(); + peer_2_config.initial_balances = initial_balances.clone(); + + let mut peer_1 = TestPeer::new(peer_1_config); + let mut peer_2 = TestPeer::new(peer_2_config); + + peer_1.add_neighbor(&mut peer_2.to_neighbor(), None, true); + peer_2.add_neighbor(&mut peer_1.to_neighbor(), None, true); + + let num_blocks = 10; + let first_stacks_block_height = { + let sn = SortitionDB::get_canonical_burn_chain_tip(&peer_1.sortdb.as_ref().unwrap().conn()) + .unwrap(); + sn.block_height + 1 + }; + + for i in 0..num_blocks { + let (burn_ops, stacks_block, microblocks) = peer_2.make_default_tenure(); + + peer_1.next_burnchain_block(burn_ops.clone()); + peer_2.next_burnchain_block(burn_ops.clone()); + + peer_1.process_stacks_epoch_at_tip(&stacks_block, µblocks); + peer_2.process_stacks_epoch_at_tip(&stacks_block, µblocks); + } + + let addr = StacksAddress { + version: C32_ADDRESS_VERSION_TESTNET_SINGLESIG, + bytes: Hash160([0xff; 20]), + }; + + let stacks_tip_ch = peer_1.network.stacks_tip.consensus_hash.clone(); + let stacks_tip_bhh = peer_1.network.stacks_tip.block_hash.clone(); + + // fill peer 1 with lots of transactions + let mut txs = HashMap::new(); + let mut peer_1_mempool = peer_1.mempool.take().unwrap(); + let mut mempool_tx = peer_1_mempool.tx_begin().unwrap(); + let mut peer_2_blacklist = vec![]; + for i in 0..num_txs { + let pk = &pks[i]; + let mut tx = StacksTransaction { + version: TransactionVersion::Testnet, + chain_id: 0x80000000, + auth: TransactionAuth::from_p2pkh(&pk).unwrap(), + anchor_mode: TransactionAnchorMode::Any, + post_condition_mode: TransactionPostConditionMode::Allow, + post_conditions: vec![], + payload: TransactionPayload::TokenTransfer( + addr.to_account_principal(), + 123, + TokenTransferMemo([0u8; 34]), + ), + }; + tx.set_tx_fee(1000); + tx.set_origin_nonce(0); + + let mut tx_signer = StacksTransactionSigner::new(&tx); + tx_signer.sign_origin(&pk).unwrap(); + + let tx = tx_signer.get_tx().unwrap(); + + let txid = tx.txid(); + let tx_bytes = tx.serialize_to_vec(); + let origin_addr = tx.origin_address(); + let origin_nonce = tx.get_origin_nonce(); + let sponsor_addr = tx.sponsor_address().unwrap_or(origin_addr.clone()); + let sponsor_nonce = tx.get_sponsor_nonce().unwrap_or(origin_nonce); + let tx_fee = tx.get_tx_fee(); + + txs.insert(tx.txid(), tx.clone()); + + // should succeed + MemPoolDB::try_add_tx( + &mut mempool_tx, + peer_1.chainstate(), + &stacks_tip_ch, + &stacks_tip_bhh, + true, + txid.clone(), + tx_bytes, + tx_fee, + num_blocks, + &origin_addr, + origin_nonce, + &sponsor_addr, + sponsor_nonce, + None, + ) + .unwrap(); + + eprintln!("Added {} {}", i, &txid); + + if i % 2 == 0 { + // peer 2 blacklists even-numbered txs + peer_2_blacklist.push(txid); + } + } + mempool_tx.commit().unwrap(); + peer_1.mempool = Some(peer_1_mempool); + + // peer 2 blacklists them all + let mut peer_2_mempool = peer_2.mempool.take().unwrap(); + + // blacklisted txs never time out + peer_2_mempool.blacklist_timeout = u64::MAX / 2; + + let mempool_tx = peer_2_mempool.tx_begin().unwrap(); + MemPoolDB::inner_blacklist_txs(&mempool_tx, &peer_2_blacklist, get_epoch_time_secs()).unwrap(); + mempool_tx.commit().unwrap(); + + peer_2.mempool = Some(peer_2_mempool); + + let num_burn_blocks = { + let sn = SortitionDB::get_canonical_burn_chain_tip(peer_1.sortdb.as_ref().unwrap().conn()) + .unwrap(); + sn.block_height + 1 + }; + + let mut round = 0; + let mut peer_1_mempool_txs = 0; + let mut peer_2_mempool_txs = 0; + + while peer_1_mempool_txs < num_txs || peer_2_mempool_txs < num_txs / 2 { + if let Ok(mut result) = peer_1.step_with_ibd(false) { + let lp = peer_1.network.local_peer.clone(); + let burnchain = peer_1.network.burnchain.clone(); + peer_1 + .with_db_state(|sortdb, chainstate, relayer, mempool| { + relayer.process_network_result( + &lp, + &mut result, + &burnchain, + sortdb, + chainstate, + mempool, + false, + None, + None, + ) + }) + .unwrap(); + } + + if let Ok(mut result) = peer_2.step_with_ibd(false) { + let lp = peer_2.network.local_peer.clone(); + let burnchain = peer_2.network.burnchain.clone(); + peer_2 + .with_db_state(|sortdb, chainstate, relayer, mempool| { + relayer.process_network_result( + &lp, + &mut result, + &burnchain, + sortdb, + chainstate, + mempool, + false, + None, + None, + ) + }) + .unwrap(); + } + + round += 1; + + let mp = peer_1.mempool.take().unwrap(); + peer_1_mempool_txs = MemPoolDB::get_all_txs(mp.conn()).unwrap().len(); + peer_1.mempool.replace(mp); + + let mp = peer_2.mempool.take().unwrap(); + peer_2_mempool_txs = MemPoolDB::get_all_txs(mp.conn()).unwrap().len(); + peer_2.mempool.replace(mp); + + info!( + "Peer 1: {}, Peer 2: {}", + peer_1_mempool_txs, peer_2_mempool_txs + ); + } + + info!("Completed mempool sync in {} step(s)", round); + + let mp = peer_2.mempool.take().unwrap(); + let peer_2_mempool_txs = MemPoolDB::get_all_txs(mp.conn()).unwrap(); + peer_2.mempool.replace(mp); + + for tx in peer_2_mempool_txs { + assert_eq!(&tx.tx, txs.get(&tx.tx.txid()).unwrap()); + assert!(!peer_2_blacklist.contains(&tx.tx.txid())); + } +} + +/// Make sure mempool sync never stores problematic transactions +#[test] +fn test_mempool_sync_2_peers_problematic() { + // peer 1 gets some transactions; peer 2 blacklists them all due to being invalid. + // verify peer 2 stores nothing. + let mut peer_1_config = TestPeerConfig::new(function_name!(), 0, 0); + let mut peer_2_config = TestPeerConfig::new(function_name!(), 0, 0); + + peer_1_config.connection_opts.mempool_sync_interval = 1; + peer_2_config.connection_opts.mempool_sync_interval = 1; + + let num_txs = 128; + let pks: Vec<_> = (0..num_txs).map(|_| StacksPrivateKey::new()).collect(); + let addrs: Vec<_> = pks.iter().map(|pk| to_addr(pk)).collect(); + let initial_balances: Vec<_> = addrs + .iter() + .map(|a| (a.to_account_principal(), 1000000000)) + .collect(); + + peer_1_config.initial_balances = initial_balances.clone(); + peer_2_config.initial_balances = initial_balances.clone(); + + let mut peer_1 = TestPeer::new(peer_1_config); + let mut peer_2 = TestPeer::new(peer_2_config); + + peer_1.add_neighbor(&mut peer_2.to_neighbor(), None, true); + peer_2.add_neighbor(&mut peer_1.to_neighbor(), None, true); + + let num_blocks = 10; + let first_stacks_block_height = { + let sn = SortitionDB::get_canonical_burn_chain_tip(&peer_1.sortdb.as_ref().unwrap().conn()) + .unwrap(); + sn.block_height + 1 + }; + + for i in 0..num_blocks { + let (burn_ops, stacks_block, microblocks) = peer_2.make_default_tenure(); + + peer_1.next_burnchain_block(burn_ops.clone()); + peer_2.next_burnchain_block(burn_ops.clone()); + + peer_1.process_stacks_epoch_at_tip(&stacks_block, µblocks); + peer_2.process_stacks_epoch_at_tip(&stacks_block, µblocks); + } + + let addr = StacksAddress { + version: C32_ADDRESS_VERSION_TESTNET_SINGLESIG, + bytes: Hash160([0xff; 20]), + }; + + let stacks_tip_ch = peer_1.network.stacks_tip.consensus_hash.clone(); + let stacks_tip_bhh = peer_1.network.stacks_tip.block_hash.clone(); + + // fill peer 1 with lots of transactions + let mut txs = HashMap::new(); + let mut peer_1_mempool = peer_1.mempool.take().unwrap(); + let mut mempool_tx = peer_1_mempool.tx_begin().unwrap(); + for i in 0..num_txs { + let pk = &pks[i]; + + let exceeds_repeat_factor = AST_CALL_STACK_DEPTH_BUFFER + (MAX_CALL_STACK_DEPTH as u64); + let tx_exceeds_body_start = "{ a : ".repeat(exceeds_repeat_factor as usize); + let tx_exceeds_body_end = "} ".repeat(exceeds_repeat_factor as usize); + let tx_exceeds_body = format!("{}u1 {}", tx_exceeds_body_start, tx_exceeds_body_end); + + let tx = make_contract_tx( + &pk, + 0, + (tx_exceeds_body.len() * 100) as u64, + "test-exceeds", + &tx_exceeds_body, + ); + + let txid = tx.txid(); + let tx_bytes = tx.serialize_to_vec(); + let origin_addr = tx.origin_address(); + let origin_nonce = tx.get_origin_nonce(); + let sponsor_addr = tx.sponsor_address().unwrap_or(origin_addr.clone()); + let sponsor_nonce = tx.get_sponsor_nonce().unwrap_or(origin_nonce); + let tx_fee = tx.get_tx_fee(); + + txs.insert(tx.txid(), tx.clone()); + + // should succeed + MemPoolDB::try_add_tx( + &mut mempool_tx, + peer_1.chainstate(), + &stacks_tip_ch, + &stacks_tip_bhh, + true, + txid.clone(), + tx_bytes, + tx_fee, + num_blocks, + &origin_addr, + origin_nonce, + &sponsor_addr, + sponsor_nonce, + None, + ) + .unwrap(); + + eprintln!("Added {} {}", i, &txid); + } + mempool_tx.commit().unwrap(); + peer_1.mempool = Some(peer_1_mempool); + + // blacklisted txs never time out + let mut peer_2_mempool = peer_2.mempool.take().unwrap(); + peer_2_mempool.blacklist_timeout = u64::MAX / 2; + peer_2.mempool = Some(peer_2_mempool); + + let num_burn_blocks = { + let sn = SortitionDB::get_canonical_burn_chain_tip(peer_1.sortdb.as_ref().unwrap().conn()) + .unwrap(); + sn.block_height + 1 + }; + + let mut round = 0; + let mut peer_1_mempool_txs = 0; + + while peer_1_mempool_txs < num_txs + || peer_2 + .network + .mempool_sync + .as_ref() + .unwrap() + .mempool_sync_txs + < (num_txs as u64) + { + if let Ok(mut result) = peer_1.step_with_ibd(false) { + let lp = peer_1.network.local_peer.clone(); + let burnchain = peer_1.network.burnchain.clone(); + peer_1 + .with_db_state(|sortdb, chainstate, relayer, mempool| { + relayer.process_network_result( + &lp, + &mut result, + &burnchain, + sortdb, + chainstate, + mempool, + false, + None, + None, + ) + }) + .unwrap(); + } + + if let Ok(mut result) = peer_2.step_with_ibd(false) { + let lp = peer_2.network.local_peer.clone(); + let burnchain = peer_2.network.burnchain.clone(); + peer_2 + .with_db_state(|sortdb, chainstate, relayer, mempool| { + relayer.process_network_result( + &lp, + &mut result, + &burnchain, + sortdb, + chainstate, + mempool, + false, + None, + None, + ) + }) + .unwrap(); + } + + round += 1; + + let mp = peer_1.mempool.take().unwrap(); + peer_1_mempool_txs = MemPoolDB::get_all_txs(mp.conn()).unwrap().len(); + peer_1.mempool.replace(mp); + + info!( + "Peer 1: {}, Peer 2: {}", + peer_1_mempool_txs, + peer_2 + .network + .mempool_sync + .as_ref() + .unwrap() + .mempool_sync_txs + ); + } + + info!("Completed mempool sync in {} step(s)", round); + + let mp = peer_2.mempool.take().unwrap(); + let peer_2_mempool_txs = MemPoolDB::get_all_txs(mp.conn()).unwrap(); + peer_2.mempool.replace(mp); + + assert_eq!(peer_2_mempool_txs.len(), 128); +} + +/// Verify that when transactions get stored into the mempool, they are always keyed to the +/// tenure-start block and its coinbase height +#[test] +pub fn test_mempool_storage_nakamoto() { + let private_key = StacksPrivateKey::from_seed(&[2]); + let addr = StacksAddress::from_public_keys( + C32_ADDRESS_VERSION_TESTNET_SINGLESIG, + &AddressHashMode::SerializeP2PKH, + 1, + &vec![StacksPublicKey::from_private(&private_key)], + ) + .unwrap(); + + let (mut test_signers, test_stackers) = TestStacker::common_signing_set(); + let mut peer = boot_nakamoto( + function_name!(), + vec![(addr.into(), 100_000_000)], + &mut test_signers, + &test_stackers, + None, + ); + + let mut total_blocks = 0; + let mut all_txs = vec![]; + let stx_miner_key = peer.miner.nakamoto_miner_key(); + let stx_miner_addr = StacksAddress::from_public_keys( + C32_ADDRESS_VERSION_TESTNET_SINGLESIG, + &AddressHashMode::SerializeP2PKH, + 1, + &vec![StacksPublicKey::from_private(&private_key)], + ) + .unwrap(); + + // duplicate handles to the chainstates so we can submit txs + let mut mempool = + MemPoolDB::open_test(false, peer.config.network_id, &peer.chainstate_path).unwrap(); + let (mut chainstate, _) = peer.chainstate().reopen().unwrap(); + let sortdb = peer.sortdb().reopen().unwrap(); + + for i in 0..10 { + debug!("Tenure {}", i); + let (burn_ops, mut tenure_change, miner_key) = + peer.begin_nakamoto_tenure(TenureChangeCause::BlockFound); + let (_, _, consensus_hash) = peer.next_burnchain_block(burn_ops.clone()); + let vrf_proof = peer.make_nakamoto_vrf_proof(miner_key); + + tenure_change.tenure_consensus_hash = consensus_hash.clone(); + tenure_change.burn_view_consensus_hash = consensus_hash.clone(); + + let tenure_change_tx = peer + .miner + .make_nakamoto_tenure_change(tenure_change.clone()); + let coinbase_tx = peer.miner.make_nakamoto_coinbase(None, vrf_proof); + + debug!("Next burnchain block: {}", &consensus_hash); + + let num_blocks: usize = (thread_rng().gen::() % 10) + 1; + + let block_height = peer.get_burn_block_height(); + + // do a stx transfer in each block to a given recipient + let recipient_addr = + StacksAddress::from_string("ST2YM3J4KQK09V670TD6ZZ1XYNYCNGCWCVTASN5VM").unwrap(); + + let mempool_txs = RefCell::new(vec![]); + let blocks_and_sizes = peer.make_nakamoto_tenure_and( + tenure_change_tx, + coinbase_tx, + &mut test_signers, + |_| {}, + |miner, chainstate, sortdb, blocks_so_far| { + let mut txs = vec![]; + if blocks_so_far.len() < num_blocks { + let account = get_account(chainstate, sortdb, &addr); + + let stx_transfer = make_token_transfer( + chainstate, + sortdb, + &private_key, + account.nonce, + 200, + 200, + &recipient_addr, + ); + txs.push(stx_transfer.clone()); + (*mempool_txs.borrow_mut()).push(stx_transfer.clone()); + all_txs.push(stx_transfer.clone()); + } + txs + }, + |_| { + let tip = NakamotoChainState::get_canonical_block_header(chainstate.db(), &sortdb) + .unwrap() + .unwrap(); + let sort_tip = + SortitionDB::get_block_snapshot_consensus(sortdb.conn(), &tip.consensus_hash) + .unwrap() + .unwrap(); + let epoch = SortitionDB::get_stacks_epoch(sortdb.conn(), sort_tip.block_height) + .unwrap() + .unwrap(); + + // submit each transaction to the mempool + for mempool_tx in (*mempool_txs.borrow()).as_slice() { + mempool + .submit( + &mut chainstate, + &sortdb, + &tip.consensus_hash, + &tip.anchored_header.block_hash(), + &mempool_tx, + None, + &epoch.block_limit, + &epoch.epoch_id, + ) + .unwrap(); + } + + (*mempool_txs.borrow_mut()).clear(); + true + }, + ); + + total_blocks += num_blocks; + } + + let tip = { + let chainstate = &mut peer.stacks_node.as_mut().unwrap().chainstate; + let sort_db = peer.sortdb.as_mut().unwrap(); + NakamotoChainState::get_canonical_block_header(chainstate.db(), sort_db) + .unwrap() + .unwrap() + }; + + // each transaction is present, and is paired with a tenure-start block + let mut recovered_txs = HashSet::new(); + let tip_block_id = tip.index_block_hash(); + let mut tenure_id = tip.consensus_hash; + loop { + let tenure_start = NakamotoChainState::get_tenure_start_block_header( + &mut chainstate.index_conn(), + &tip_block_id, + &tenure_id, + ) + .unwrap() + .unwrap(); + + let all_txdata = MemPoolDB::get_txs_after( + mempool.conn(), + &tenure_start.consensus_hash, + &tenure_start.anchored_header.block_hash(), + 0, + u64::try_from(i64::MAX - 1).unwrap(), + ) + .unwrap(); + for txdata in all_txdata { + recovered_txs.insert(txdata.tx.txid()); + } + + let Some(parent_tenure_id) = + NakamotoChainState::get_nakamoto_parent_tenure_id_consensus_hash( + &mut chainstate.index_conn(), + &tip_block_id, + &tenure_id, + ) + .unwrap() + else { + break; + }; + tenure_id = parent_tenure_id; + } + + let all_txs_set: HashSet<_> = all_txs.into_iter().map(|tx| tx.txid()).collect(); + assert_eq!(all_txs_set, recovered_txs); +} + +#[test] +fn test_mempool_sync_2_peers_nakamoto_paginated() { + let observer = TestEventObserver::new(); + let bitvecs = vec![ + // full rc + vec![true, true, true, true, true, true, true, true, true, true], + ]; + let num_txs = 1024; + let pks: Vec<_> = (0..num_txs).map(|_| StacksPrivateKey::new()).collect(); + let addrs: Vec<_> = pks.iter().map(|pk| to_addr(pk)).collect(); + let initial_balances: Vec<_> = addrs + .iter() + .map(|a| (a.to_account_principal(), 1000000000)) + .collect(); + + let (mut peer_1, mut other_peers) = make_nakamoto_peers_from_invs_and_balances( + function_name!(), + &observer, + 10, + 3, + bitvecs.clone(), + 1, + initial_balances, + ); + let mut peer_2 = other_peers.pop().unwrap(); + + let nakamoto_start = + NakamotoBootPlan::nakamoto_first_tenure_height(&peer_1.config.burnchain.pox_constants); + + let tip = { + let sort_db = peer_1.sortdb.as_mut().unwrap(); + SortitionDB::get_canonical_burn_chain_tip(sort_db.conn()).unwrap() + }; + let total_rcs = peer_1 + .config + .burnchain + .block_height_to_reward_cycle(tip.block_height) + .unwrap(); + + // run peer and other_peer until they connect + loop { + let _ = peer_1.step_with_ibd(false); + let _ = peer_2.step_with_ibd(false); + + let event_ids: Vec = peer_1 + .network + .iter_peer_event_ids() + .map(|e_id| *e_id) + .collect(); + let other_event_ids: Vec = peer_2 + .network + .iter_peer_event_ids() + .map(|e_id| *e_id) + .collect(); + + if event_ids.len() > 0 && other_event_ids.len() > 0 { + break; + } + } + + debug!("Peers are connected"); + + let addr = StacksAddress { + version: C32_ADDRESS_VERSION_TESTNET_SINGLESIG, + bytes: Hash160([0xff; 20]), + }; + + let stacks_tip_ch = peer_1.network.stacks_tip.consensus_hash.clone(); + let stacks_tip_bhh = peer_1.network.stacks_tip.block_hash.clone(); + + // find coinbase height + let coinbase_height = NakamotoChainState::get_coinbase_height( + &mut peer_1.chainstate().index_conn(), + &StacksBlockId::new(&stacks_tip_ch, &stacks_tip_bhh), + ) + .unwrap() + .unwrap(); + + // fill peer 1 with lots of transactions + let mut txs = HashMap::new(); + let mut peer_1_mempool = peer_1.mempool.take().unwrap(); + let mut mempool_tx = peer_1_mempool.tx_begin().unwrap(); + for i in 0..num_txs { + let pk = &pks[i]; + let mut tx = StacksTransaction { + version: TransactionVersion::Testnet, + chain_id: 0x80000000, + auth: TransactionAuth::from_p2pkh(&pk).unwrap(), + anchor_mode: TransactionAnchorMode::Any, + post_condition_mode: TransactionPostConditionMode::Allow, + post_conditions: vec![], + payload: TransactionPayload::TokenTransfer( + addr.to_account_principal(), + 123, + TokenTransferMemo([0u8; 34]), + ), + }; + tx.set_tx_fee(1000); + tx.set_origin_nonce(0); + + let mut tx_signer = StacksTransactionSigner::new(&tx); + tx_signer.sign_origin(&pk).unwrap(); + + let tx = tx_signer.get_tx().unwrap(); + + let txid = tx.txid(); + let tx_bytes = tx.serialize_to_vec(); + let origin_addr = tx.origin_address(); + let origin_nonce = tx.get_origin_nonce(); + let sponsor_addr = tx.sponsor_address().unwrap_or(origin_addr.clone()); + let sponsor_nonce = tx.get_sponsor_nonce().unwrap_or(origin_nonce); + let tx_fee = tx.get_tx_fee(); + + txs.insert(tx.txid(), tx.clone()); + + // should succeed + MemPoolDB::try_add_tx( + &mut mempool_tx, + peer_1.chainstate(), + &stacks_tip_ch, + &stacks_tip_bhh, + true, + txid.clone(), + tx_bytes, + tx_fee, + coinbase_height, + &origin_addr, + origin_nonce, + &sponsor_addr, + sponsor_nonce, + None, + ) + .unwrap(); + + eprintln!("Added {} {}", i, &txid); + } + mempool_tx.commit().unwrap(); + peer_1.mempool = Some(peer_1_mempool); + + let num_burn_blocks = { + let sn = SortitionDB::get_canonical_burn_chain_tip(peer_1.sortdb.as_ref().unwrap().conn()) + .unwrap(); + sn.block_height + 1 + }; + + let mut round = 0; + let mut peer_1_mempool_txs = 0; + let mut peer_2_mempool_txs = 0; + + while peer_1_mempool_txs < num_txs || peer_2_mempool_txs < num_txs { + if let Ok(mut result) = peer_1.step_with_ibd(false) { + let lp = peer_1.network.local_peer.clone(); + let burnchain = peer_1.network.burnchain.clone(); + peer_1 + .with_db_state(|sortdb, chainstate, relayer, mempool| { + relayer.process_network_result( + &lp, + &mut result, + &burnchain, + sortdb, + chainstate, + mempool, + false, + None, + None, + ) + }) + .unwrap(); + } + + if let Ok(mut result) = peer_2.step_with_ibd(false) { + let lp = peer_2.network.local_peer.clone(); + let burnchain = peer_2.network.burnchain.clone(); + peer_2 + .with_db_state(|sortdb, chainstate, relayer, mempool| { + relayer.process_network_result( + &lp, + &mut result, + &burnchain, + sortdb, + chainstate, + mempool, + false, + None, + None, + ) + }) + .unwrap(); + } + + round += 1; + + let mp = peer_1.mempool.take().unwrap(); + peer_1_mempool_txs = MemPoolDB::get_all_txs(mp.conn()).unwrap().len(); + peer_1.mempool.replace(mp); + + let mp = peer_2.mempool.take().unwrap(); + peer_2_mempool_txs = MemPoolDB::get_all_txs(mp.conn()).unwrap().len(); + peer_2.mempool.replace(mp); + + info!( + "Peer 1: {}, Peer 2: {}", + peer_1_mempool_txs, peer_2_mempool_txs + ); + } + + info!("Completed mempool sync in {} step(s)", round); + + let mp = peer_2.mempool.take().unwrap(); + let peer_2_mempool_txs = MemPoolDB::get_all_txs(mp.conn()).unwrap(); + peer_2.mempool.replace(mp); + + for tx in peer_2_mempool_txs { + assert_eq!(&tx.tx, txs.get(&tx.tx.txid()).unwrap()); + } +} diff --git a/stackslib/src/net/tests/mod.rs b/stackslib/src/net/tests/mod.rs index 82e1b8b8146..05477bb08c0 100644 --- a/stackslib/src/net/tests/mod.rs +++ b/stackslib/src/net/tests/mod.rs @@ -17,7 +17,11 @@ pub mod download; pub mod httpcore; pub mod inv; +pub mod mempool; pub mod neighbors; +pub mod relay; + +use std::collections::HashSet; use clarity::vm::clarity::ClarityConnection; use clarity::vm::types::PrincipalData; @@ -28,7 +32,7 @@ use stacks_common::consts::{FIRST_BURNCHAIN_CONSENSUS_HASH, FIRST_STACKS_BLOCK_H use stacks_common::types::chainstate::{ StacksAddress, StacksBlockId, StacksPrivateKey, StacksPublicKey, }; -use stacks_common::types::Address; +use stacks_common::types::{Address, StacksEpochId}; use stacks_common::util::vrf::VRFProof; use wsts::curve::point::Point; @@ -36,9 +40,8 @@ use crate::burnchains::PoxConstants; use crate::chainstate::burn::db::sortdb::{SortitionDB, SortitionHandle}; use crate::chainstate::burn::operations::BlockstackOperationType; use crate::chainstate::coordinator::tests::p2pkh_from; -use crate::chainstate::nakamoto::coordinator::tests::{ - boot_nakamoto, make_all_signers_vote_for_aggregate_key, -}; +use crate::chainstate::nakamoto::coordinator::tests::boot_nakamoto; +use crate::chainstate::nakamoto::staging_blocks::NakamotoBlockObtainMethod; use crate::chainstate::nakamoto::test_signers::TestSigners; use crate::chainstate::nakamoto::tests::get_account; use crate::chainstate::nakamoto::tests::node::TestStacker; @@ -90,13 +93,13 @@ pub struct NakamotoBootPlan { impl NakamotoBootPlan { pub fn new(test_name: &str) -> Self { - let test_signers = TestSigners::default(); + let (test_signers, test_stackers) = TestStacker::common_signing_set(); Self { test_name: test_name.to_string(), pox_constants: TestPeerConfig::default().burnchain.pox_constants, private_key: StacksPrivateKey::from_seed(&[2]), initial_balances: vec![], - test_stackers: TestStacker::common_signing_set(&test_signers), + test_stackers, test_signers, observer: Some(TestEventObserver::new()), num_peers: 0, @@ -215,6 +218,7 @@ impl NakamotoBootPlan { fn apply_blocks_to_other_peers( burn_ops: &[BlockstackOperationType], blocks: &[NakamotoBlock], + malleablized_blocks: &[NakamotoBlock], other_peers: &mut [TestPeer], ) { info!("Applying block to other peers"; "block_height" => ?burn_ops.first().map(|op| op.block_height())); @@ -227,14 +231,26 @@ impl NakamotoBootPlan { let sort_tip = SortitionDB::get_canonical_sortition_tip(sortdb.conn()).unwrap(); let mut sort_handle = sortdb.index_handle(&sort_tip); + let mut possible_chain_tips = HashSet::new(); + for block in blocks { + debug!( + "Apply block {} (sighash {}) to peer {} ({})", + &block.block_id(), + &block.header.signer_signature_hash(), + i, + &peer.to_neighbor().addr + ); let block_id = block.block_id(); let accepted = Relayer::process_new_nakamoto_block( + &peer.network.burnchain, &sortdb, &mut sort_handle, &mut node.chainstate, - block.clone(), + &peer.network.stacks_tip.block_id(), + &block, None, + NakamotoBlockObtainMethod::Pushed, ) .unwrap(); if accepted { @@ -246,19 +262,66 @@ impl NakamotoBootPlan { i ); } + + possible_chain_tips.insert(block.block_id()); + + // process it + peer.coord.handle_new_stacks_block().unwrap(); + peer.coord.handle_new_nakamoto_stacks_block().unwrap(); + } + + for block in malleablized_blocks { + debug!( + "Apply malleablized block {} (sighash {}) to peer {} ({})", + &block.block_id(), + &block.header.signer_signature_hash(), + i, + &peer.to_neighbor().addr + ); + let block_id = block.block_id(); + let accepted = Relayer::process_new_nakamoto_block( + &peer.network.burnchain, + &sortdb, + &mut sort_handle, + &mut node.chainstate, + &peer.network.stacks_tip.block_id(), + &block, + None, + NakamotoBlockObtainMethod::Pushed, + ) + .unwrap(); + if accepted { + test_debug!( + "Accepted malleablized Nakamoto block {block_id} to other peer {}", + i + ); + peer.coord.handle_new_nakamoto_stacks_block().unwrap(); + } else { + panic!( + "Did NOT accept malleablized Nakamoto block {block_id} to other peer {}", + i + ); + } + + possible_chain_tips.insert(block.block_id()); + + // process it + peer.coord.handle_new_stacks_block().unwrap(); + peer.coord.handle_new_nakamoto_stacks_block().unwrap(); } peer.sortdb = Some(sortdb); peer.stacks_node = Some(node); peer.refresh_burnchain_view(); + + assert!(possible_chain_tips.contains(&peer.network.stacks_tip.block_id())); } } /// Make a peer and transition it into the Nakamoto epoch. /// The node needs to be stacking; otherwise, Nakamoto won't activate. - fn boot_nakamoto<'a>( + fn boot_nakamoto_peers<'a>( mut self, - aggregate_public_key: Point, observer: Option<&'a TestEventObserver>, ) -> (TestPeer<'a>, Vec) { let mut peer_config = TestPeerConfig::new(&self.test_name, 0, 0); @@ -275,7 +338,6 @@ impl NakamotoBootPlan { // first 25 blocks are boot-up // reward cycle 6 instantiates pox-3 // we stack in reward cycle 7 so pox-3 is evaluated to find reward set participation - peer_config.aggregate_public_key = Some(aggregate_public_key.clone()); peer_config .stacker_dbs .push(boot_code_id(MINERS_NAME, false)); @@ -290,6 +352,7 @@ impl NakamotoBootPlan { peer_config .initial_balances .append(&mut self.initial_balances.clone()); + peer_config.connection_opts.block_proposal_token = Some("password".to_string()); // Create some balances for test Stackers // They need their stacking amount + enough to pay fees @@ -336,28 +399,38 @@ impl NakamotoBootPlan { let mut peer_nonce = 0; let mut other_peer_nonces = vec![0; other_peers.len()]; let addr = StacksAddress::p2pkh(false, &StacksPublicKey::from_private(&self.private_key)); + let default_pox_addr = + PoxAddress::from_legacy(AddressHashMode::SerializeP2PKH, addr.bytes.clone()); - let tip = { - let sort_db = peer.sortdb.as_mut().unwrap(); - let tip = SortitionDB::get_canonical_burn_chain_tip(sort_db.conn()).unwrap(); - tip - }; - + let mut sortition_height = peer.get_burn_block_height(); debug!("\n\n======================"); debug!("PoxConstants = {:#?}", &peer.config.burnchain.pox_constants); - debug!("tip = {}", tip.block_height); + debug!("tip = {}", sortition_height); debug!("========================\n\n"); - // advance to just past pox-3 unlock - let mut sortition_height = tip.block_height; - while sortition_height - <= peer - .config - .burnchain - .pox_constants - .pox_4_activation_height - .into() - { + let epoch_25_height = peer + .config + .epochs + .as_ref() + .unwrap() + .iter() + .find(|e| e.epoch_id == StacksEpochId::Epoch25) + .unwrap() + .start_height; + + let epoch_30_height = peer + .config + .epochs + .as_ref() + .unwrap() + .iter() + .find(|e| e.epoch_id == StacksEpochId::Epoch30) + .unwrap() + .start_height; + + // advance to just past pox-4 instantiation + let mut blocks_produced = false; + while sortition_height <= epoch_25_height { peer.tenure_with_txs(&vec![], &mut peer_nonce); for (other_peer, other_peer_nonce) in other_peers.iter_mut().zip(other_peer_nonces.iter_mut()) @@ -365,12 +438,23 @@ impl NakamotoBootPlan { other_peer.tenure_with_txs(&vec![], other_peer_nonce); } - let tip = { - let sort_db = peer.sortdb.as_mut().unwrap(); - let tip = SortitionDB::get_canonical_burn_chain_tip(sort_db.conn()).unwrap(); - tip - }; - sortition_height = tip.block_height; + sortition_height = peer.get_burn_block_height(); + blocks_produced = true; + } + + // need to produce at least 1 block before making pox-4 lockups: + // the way `burn-block-height` constant works in Epoch 2.5 is such + // that if its the first block produced, this will be 0 which will + // prevent the lockups from being valid. + if !blocks_produced { + peer.tenure_with_txs(&vec![], &mut peer_nonce); + for (other_peer, other_peer_nonce) in + other_peers.iter_mut().zip(other_peer_nonces.iter_mut()) + { + other_peer.tenure_with_txs(&vec![], other_peer_nonce); + } + + sortition_height = peer.get_burn_block_height(); } debug!("\n\n======================"); @@ -391,15 +475,18 @@ impl NakamotoBootPlan { .unwrap_or(vec![]) .iter() .map(|test_stacker| { - let pox_addr = - PoxAddress::from_legacy(AddressHashMode::SerializeP2PKH, addr.bytes.clone()); + let pox_addr = test_stacker + .pox_addr + .clone() + .unwrap_or(default_pox_addr.clone()); + let max_amount = test_stacker.max_amount.unwrap_or(u128::MAX); let signature = make_signer_key_signature( &pox_addr, &test_stacker.signer_private_key, reward_cycle.into(), &crate::util_lib::signed_structured_data::pox4::Pox4SignatureTopic::StackStx, 12_u128, - u128::MAX, + max_amount, 1, ); make_pox_4_lockup( @@ -409,19 +496,39 @@ impl NakamotoBootPlan { &pox_addr, 12, &StacksPublicKey::from_private(&test_stacker.signer_private_key), - 34, + sortition_height + 1, Some(signature), - u128::MAX, + max_amount, 1, ) }) .collect(); + let old_tip = peer.network.stacks_tip.clone(); let mut stacks_block = peer.tenure_with_txs(&stack_txs, &mut peer_nonce); + + let (stacks_tip_ch, stacks_tip_bh) = + SortitionDB::get_canonical_stacks_chain_tip_hash(peer.sortdb().conn()).unwrap(); + let stacks_tip = StacksBlockId::new(&stacks_tip_ch, &stacks_tip_bh); + assert_eq!(peer.network.stacks_tip.block_id(), stacks_tip); + if old_tip.block_id() != stacks_tip { + assert_eq!(old_tip, peer.network.parent_stacks_tip); + } + for (other_peer, other_peer_nonce) in other_peers.iter_mut().zip(other_peer_nonces.iter_mut()) { + let old_tip = other_peer.network.stacks_tip.clone(); other_peer.tenure_with_txs(&stack_txs, other_peer_nonce); + + let (stacks_tip_ch, stacks_tip_bh) = + SortitionDB::get_canonical_stacks_chain_tip_hash(other_peer.sortdb().conn()) + .unwrap(); + let stacks_tip = StacksBlockId::new(&stacks_tip_ch, &stacks_tip_bh); + assert_eq!(other_peer.network.stacks_tip.block_id(), stacks_tip); + if old_tip.block_id() != stacks_tip { + assert_eq!(old_tip, other_peer.network.parent_stacks_tip); + } } debug!("\n\n======================"); @@ -432,47 +539,33 @@ impl NakamotoBootPlan { .burnchain .is_in_prepare_phase(sortition_height.into()) { + let old_tip = peer.network.stacks_tip.clone(); stacks_block = peer.tenure_with_txs(&[], &mut peer_nonce); + + let (stacks_tip_ch, stacks_tip_bh) = + SortitionDB::get_canonical_stacks_chain_tip_hash(peer.sortdb().conn()).unwrap(); + let stacks_tip = StacksBlockId::new(&stacks_tip_ch, &stacks_tip_bh); + assert_eq!(peer.network.stacks_tip.block_id(), stacks_tip); + if old_tip.block_id() != stacks_tip { + assert_eq!(old_tip, peer.network.parent_stacks_tip); + } other_peers .iter_mut() .zip(other_peer_nonces.iter_mut()) .for_each(|(peer, nonce)| { + let old_tip = peer.network.stacks_tip.clone(); peer.tenure_with_txs(&[], nonce); - }); - let tip = { - let sort_db = peer.sortdb.as_mut().unwrap(); - let tip = SortitionDB::get_canonical_burn_chain_tip(sort_db.conn()).unwrap(); - tip - }; - sortition_height = tip.block_height; - } - - debug!("\n\n======================"); - debug!("Vote for the Aggregate Key"); - debug!("========================\n\n"); - let target_cycle = peer - .config - .burnchain - .block_height_to_reward_cycle(sortition_height.into()) - .expect("Failed to get reward cycle") - + 1; - let vote_txs = with_sortdb(peer, |chainstate, sortdb| { - make_all_signers_vote_for_aggregate_key( - chainstate, - sortdb, - &stacks_block, - &mut self.test_signers, - &self.test_stackers, - target_cycle.into(), - ) - }); - - peer.tenure_with_txs(&vote_txs, &mut peer_nonce); - for (other_peer, other_peer_nonce) in - other_peers.iter_mut().zip(other_peer_nonces.iter_mut()) - { - other_peer.tenure_with_txs(&vote_txs, other_peer_nonce); + let (stacks_tip_ch, stacks_tip_bh) = + SortitionDB::get_canonical_stacks_chain_tip_hash(peer.sortdb().conn()) + .unwrap(); + let stacks_tip = StacksBlockId::new(&stacks_tip_ch, &stacks_tip_bh); + assert_eq!(peer.network.stacks_tip.block_id(), stacks_tip); + if old_tip.block_id() != stacks_tip { + assert_eq!(old_tip, peer.network.parent_stacks_tip); + } + }); + sortition_height = peer.get_burn_block_height(); } debug!("\n\n======================"); @@ -480,21 +573,34 @@ impl NakamotoBootPlan { debug!("========================\n\n"); // advance to the start of epoch 3.0 - while sortition_height - < Self::nakamoto_start_burn_height(&peer.config.burnchain.pox_constants) - { + while sortition_height < epoch_30_height - 1 { + let old_tip = peer.network.stacks_tip.clone(); peer.tenure_with_txs(&vec![], &mut peer_nonce); + + let (stacks_tip_ch, stacks_tip_bh) = + SortitionDB::get_canonical_stacks_chain_tip_hash(peer.sortdb().conn()).unwrap(); + let stacks_tip = StacksBlockId::new(&stacks_tip_ch, &stacks_tip_bh); + assert_eq!(peer.network.stacks_tip.block_id(), stacks_tip); + if old_tip.block_id() != stacks_tip { + assert_eq!(old_tip, peer.network.parent_stacks_tip); + } + for (other_peer, other_peer_nonce) in other_peers.iter_mut().zip(other_peer_nonces.iter_mut()) { + let old_tip = peer.network.stacks_tip.clone(); other_peer.tenure_with_txs(&vec![], other_peer_nonce); + + let (stacks_tip_ch, stacks_tip_bh) = + SortitionDB::get_canonical_stacks_chain_tip_hash(other_peer.sortdb().conn()) + .unwrap(); + let stacks_tip = StacksBlockId::new(&stacks_tip_ch, &stacks_tip_bh); + assert_eq!(other_peer.network.stacks_tip.block_id(), stacks_tip); + if old_tip.block_id() != stacks_tip { + assert_eq!(old_tip, other_peer.network.parent_stacks_tip); + } } - let tip = { - let sort_db = peer.sortdb.as_mut().unwrap(); - let tip = SortitionDB::get_canonical_burn_chain_tip(sort_db.conn()).unwrap(); - tip - }; - sortition_height = tip.block_height; + sortition_height = peer.get_burn_block_height(); } debug!("\n\n======================"); @@ -511,14 +617,17 @@ impl NakamotoBootPlan { let pox_constants = self.pox_constants.clone(); let test_stackers = self.test_stackers.clone(); - let (mut peer, mut other_peers) = - self.boot_nakamoto(test_signers.aggregate_public_key.clone(), observer); + let (mut peer, mut other_peers) = self.boot_nakamoto_peers(observer); + if boot_plan.is_empty() { + debug!("No boot plan steps supplied -- returning once nakamoto epoch has been reached"); + return (peer, other_peers); + } let mut all_blocks = vec![]; + let mut malleablized_block_ids = HashSet::new(); let mut consensus_hashes = vec![]; let mut last_tenure_change: Option = None; let mut blocks_since_last_tenure = 0; - let stx_miner_key = peer.miner.nakamoto_miner_key(); debug!("\n\nProcess plan with {} steps", boot_plan.len()); @@ -605,12 +714,24 @@ impl NakamotoBootPlan { .map(|(block, _, _)| block) .collect(); + let malleablized_blocks = + std::mem::replace(&mut peer.malleablized_blocks, vec![]); + for mblk in malleablized_blocks.iter() { + malleablized_block_ids.insert(mblk.block_id()); + } + Self::check_blocks_against_boot_plan( &blocks, &boot_steps, num_expected_transactions, ); - Self::apply_blocks_to_other_peers(&burn_ops, &blocks, &mut other_peers); + + Self::apply_blocks_to_other_peers( + &burn_ops, + &blocks, + &malleablized_blocks, + &mut other_peers, + ); all_blocks.push(blocks); } NakamotoBootTenure::Sortition(boot_steps) => { @@ -651,29 +772,6 @@ impl NakamotoBootPlan { i += 1; let mut txs = vec![]; - // check if the stacker/signers need to vote for an aggregate key. if so, append those transactions - // to the end of the block. - // NOTE: this will only work the block after .signers is updated, because `make_all_signers_vote...` - // checks the chainstate as of `tip` to obtain the signer vector. this means that some tests may - // need to produce an extra block in a tenure in order to get the signer votes in place. - // The alternative to doing this would be to either manually build the signer vector or to refactor - // the testpeer such that a callback is provided during the actual mining of the block with a - // `ClarityBlockConnection`. - let mut voting_txs = if pox_constants.is_in_prepare_phase(first_burn_ht, burn_ht) { - let tip = NakamotoChainState::get_canonical_block_header(chainstate.db(), sortdb).unwrap().unwrap(); - let cycle_id = 1 + pox_constants.block_height_to_reward_cycle(first_burn_ht, burn_ht).unwrap(); - make_all_signers_vote_for_aggregate_key( - chainstate, - sortdb, - &tip.index_block_hash(), - &mut test_signers.clone(), - &test_stackers, - u128::from(cycle_id), - ) - } else { - vec![] - }; - let last_block_opt = blocks_so_far .last() .as_ref() @@ -705,9 +803,6 @@ impl NakamotoBootPlan { } } - num_expected_transactions += voting_txs.len(); - txs.append(&mut voting_txs); - blocks_since_last_tenure += 1; txs }); @@ -719,26 +814,44 @@ impl NakamotoBootPlan { .map(|(block, _, _)| block) .collect(); + let malleablized_blocks = + std::mem::replace(&mut peer.malleablized_blocks, vec![]); + for mblk in malleablized_blocks.iter() { + malleablized_block_ids.insert(mblk.block_id()); + } + Self::check_blocks_against_boot_plan( &blocks, &boot_steps, num_expected_transactions, ); - Self::apply_blocks_to_other_peers(&burn_ops, &blocks, &mut other_peers); + Self::apply_blocks_to_other_peers( + &burn_ops, + &blocks, + &malleablized_blocks, + &mut other_peers, + ); all_blocks.push(blocks); } } } + // check that our tenure-extends have been getting applied let (highest_tenure, sort_tip) = { let chainstate = &mut peer.stacks_node.as_mut().unwrap().chainstate; let sort_db = peer.sortdb.as_mut().unwrap(); let tip = SortitionDB::get_canonical_burn_chain_tip(sort_db.conn()).unwrap(); - let tenure = - NakamotoChainState::get_highest_nakamoto_tenure(chainstate.db(), sort_db.conn()) + let tenure = NakamotoChainState::get_ongoing_tenure( + &mut chainstate.index_conn(), + &sort_db + .index_handle_at_tip() + .get_nakamoto_tip_block_id() .unwrap() - .unwrap(); + .unwrap(), + ) + .unwrap() + .unwrap(); (tenure, tip) }; @@ -763,12 +876,24 @@ impl NakamotoBootPlan { // already checked that `all_blocks` matches the boot plan, so just check that each // transaction in `all_blocks` ran to completion if let Some(observer) = observer { - let observed_blocks = observer.get_blocks(); + let mut observed_blocks = observer.get_blocks(); let mut block_idx = (peer.config.burnchain.pox_constants.pox_4_activation_height + peer.config.burnchain.pox_constants.reward_cycle_length - 25) as usize; - for tenure in all_blocks { - for block in tenure { + + // filter out observed blocks that are malleablized + observed_blocks.retain(|blk| { + if let Some(nakamoto_block_header) = + blk.metadata.anchored_header.as_stacks_nakamoto() + { + !malleablized_block_ids.contains(&nakamoto_block_header.block_id()) + } else { + true + } + }); + + for tenure in all_blocks.iter() { + for block in tenure.iter() { let observed_block = &observed_blocks[block_idx]; block_idx += 1; @@ -809,9 +934,13 @@ impl NakamotoBootPlan { let chainstate = &mut other_peer.stacks_node.as_mut().unwrap().chainstate; let sort_db = other_peer.sortdb.as_mut().unwrap(); let tip = SortitionDB::get_canonical_burn_chain_tip(sort_db.conn()).unwrap(); - let tenure = NakamotoChainState::get_highest_nakamoto_tenure( - chainstate.db(), - sort_db.conn(), + let tenure = NakamotoChainState::get_ongoing_tenure( + &mut chainstate.index_conn(), + &sort_db + .index_handle_at_tip() + .get_nakamoto_tip_block_id() + .unwrap() + .unwrap(), ) .unwrap() .unwrap(); @@ -822,9 +951,14 @@ impl NakamotoBootPlan { assert_eq!(other_sort_tip, sort_tip); } + // flatten + let all_blocks: Vec = all_blocks.into_iter().flatten().collect(); + peer.check_nakamoto_migration(); + peer.check_malleablized_blocks(all_blocks.clone(), 2); for other_peer in other_peers.iter_mut() { other_peer.check_nakamoto_migration(); + other_peer.check_malleablized_blocks(all_blocks.clone(), 2); } (peer, other_peers) } @@ -954,11 +1088,18 @@ fn test_boot_nakamoto_peer() { NakamotoBootTenure::Sortition(vec![NakamotoBootStep::Block(vec![next_stx_transfer()])]), ]; + // make malleablized blocks + let (test_signers, test_stackers) = TestStacker::multi_signing_set(&[ + 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, + ]); + let plan = NakamotoBootPlan::new(&function_name!()) .with_private_key(private_key) .with_pox_constants(10, 3) .with_initial_balances(vec![(addr.into(), 1_000_000)]) - .with_extra_peers(2); + .with_extra_peers(2) + .with_test_signers(test_signers) + .with_test_stackers(test_stackers); let observer = TestEventObserver::new(); let (peer, other_peers) = plan.boot_into_nakamoto_peers(boot_tenures, Some(&observer)); diff --git a/stackslib/src/net/tests/relay/epoch2x.rs b/stackslib/src/net/tests/relay/epoch2x.rs new file mode 100644 index 00000000000..e6a69f5dc02 --- /dev/null +++ b/stackslib/src/net/tests/relay/epoch2x.rs @@ -0,0 +1,3737 @@ +// Copyright (C) 2024 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use std::cell::RefCell; +use std::collections::HashMap; + +use clarity::vm::ast::stack_depth_checker::AST_CALL_STACK_DEPTH_BUFFER; +use clarity::vm::ast::ASTRules; +use clarity::vm::costs::LimitedCostTracker; +use clarity::vm::database::ClarityDatabase; +use clarity::vm::types::{QualifiedContractIdentifier, StacksAddressExtensions}; +use clarity::vm::{ClarityVersion, MAX_CALL_STACK_DEPTH}; +use rand::Rng; +use stacks_common::address::AddressHashMode; +use stacks_common::types::chainstate::{BlockHeaderHash, StacksBlockId, StacksWorkScore, TrieHash}; +use stacks_common::types::Address; +use stacks_common::util::hash::{MerkleTree, Sha512Trunc256Sum}; +use stacks_common::util::sleep_ms; +use stacks_common::util::vrf::VRFProof; + +use crate::burnchains::tests::TestMiner; +use crate::chainstate::stacks::db::blocks::{MINIMUM_TX_FEE, MINIMUM_TX_FEE_RATE_PER_BYTE}; +use crate::chainstate::stacks::miner::{BlockBuilderSettings, StacksMicroblockBuilder}; +use crate::chainstate::stacks::test::codec_all_transactions; +use crate::chainstate::stacks::tests::{ + make_coinbase, make_coinbase_with_nonce, make_smart_contract_with_version, + make_user_stacks_transfer, +}; +use crate::chainstate::stacks::{Error as ChainstateError, *}; +use crate::clarity_vm::clarity::ClarityConnection; +use crate::core::*; +use crate::net::api::getinfo::RPCPeerInfoData; +use crate::net::asn::*; +use crate::net::chat::*; +use crate::net::codec::*; +use crate::net::db::PeerDB; +use crate::net::download::*; +use crate::net::http::{HttpRequestContents, HttpRequestPreamble}; +use crate::net::httpcore::StacksHttpMessage; +use crate::net::inv::inv2x::*; +use crate::net::p2p::*; +use crate::net::relay::*; +use crate::net::test::*; +use crate::net::tests::download::epoch2x::run_get_blocks_and_microblocks; +use crate::net::{Error as net_error, *}; +use crate::util_lib::test::*; + +#[test] +fn test_sample_neighbors() { + let neighbors: Vec<_> = (0..10) + .map(|i| { + let nk = NeighborKey { + peer_version: 12345, + network_id: 0x80000000, + addrbytes: PeerAddress([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0xff, 0xff, 127, 0, 0, 1]), + port: i, + }; + nk + }) + .collect(); + + let neighbors_set: HashSet<_> = neighbors.clone().into_iter().collect(); + + let empty_distribution: HashMap = HashMap::new(); + + assert_eq!( + RelayerStats::sample_neighbors(empty_distribution.clone(), 0).len(), + 0 + ); + assert_eq!( + RelayerStats::sample_neighbors(empty_distribution.clone(), 1).len(), + 0 + ); + assert_eq!( + RelayerStats::sample_neighbors(empty_distribution.clone(), 5).len(), + 0 + ); + assert_eq!( + RelayerStats::sample_neighbors(empty_distribution.clone(), 10).len(), + 0 + ); + + let flat_distribution: HashMap<_, _> = neighbors.iter().map(|nk| (nk.clone(), 1)).collect(); + + assert_eq!( + RelayerStats::sample_neighbors(flat_distribution.clone(), 0).len(), + 0 + ); + assert_eq!( + RelayerStats::sample_neighbors(flat_distribution.clone(), 1).len(), + 1 + ); + + let flat_full_sample_set: HashSet<_> = + RelayerStats::sample_neighbors(flat_distribution.clone(), 10) + .into_iter() + .collect(); + + assert_eq!(flat_full_sample_set, neighbors_set); + + let flat_partial_sample_set: HashSet<_> = + RelayerStats::sample_neighbors(flat_distribution.clone(), 5) + .into_iter() + .collect(); + + assert_eq!(flat_partial_sample_set.len(), 5); + + let flat_unit_sample_set: HashSet<_> = + RelayerStats::sample_neighbors(flat_distribution.clone(), 1) + .into_iter() + .collect(); + + assert_eq!(flat_unit_sample_set.len(), 1); + + let biased_distribution: HashMap<_, _> = neighbors + .iter() + .enumerate() + .map(|(i, nk)| (nk.clone(), if i == 0 { 10 } else { 1 })) + .collect(); + + assert_eq!( + RelayerStats::sample_neighbors(biased_distribution.clone(), 0).len(), + 0 + ); + assert_eq!( + RelayerStats::sample_neighbors(biased_distribution.clone(), 1).len(), + 1 + ); + + let flat_full_sample_set: HashSet<_> = + RelayerStats::sample_neighbors(biased_distribution.clone(), 10) + .into_iter() + .collect(); + + assert_eq!(flat_full_sample_set, neighbors_set); + + let flat_partial_sample_set: HashSet<_> = + RelayerStats::sample_neighbors(biased_distribution.clone(), 5) + .into_iter() + .collect(); + + assert_eq!(flat_partial_sample_set.len(), 5); + + let flat_unit_sample_set: HashSet<_> = + RelayerStats::sample_neighbors(biased_distribution.clone(), 1) + .into_iter() + .collect(); + + assert_eq!(flat_unit_sample_set.len(), 1); +} + +#[test] +fn test_relayer_stats_add_relyed_messages() { + let mut relay_stats = RelayerStats::new(); + + let all_transactions = codec_all_transactions( + &TransactionVersion::Testnet, + 0x80000000, + &TransactionAnchorMode::Any, + &TransactionPostConditionMode::Allow, + StacksEpochId::latest(), + ); + assert!(all_transactions.len() > MAX_RECENT_MESSAGES); + + eprintln!("Test with {} transactions", all_transactions.len()); + + let nk = NeighborKey { + peer_version: 12345, + network_id: 0x80000000, + addrbytes: PeerAddress([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0xff, 0xff, 127, 0, 0, 1]), + port: 54321, + }; + + // never overflow recent messages for a neighbor + for (i, tx) in all_transactions.iter().enumerate() { + relay_stats.add_relayed_message(nk.clone(), tx); + + assert_eq!(relay_stats.recent_messages.len(), 1); + assert!(relay_stats.recent_messages.get(&nk).unwrap().len() <= MAX_RECENT_MESSAGES); + + assert_eq!(relay_stats.recent_updates.len(), 1); + } + + assert_eq!( + relay_stats.recent_messages.get(&nk).unwrap().len(), + MAX_RECENT_MESSAGES + ); + + for i in (all_transactions.len() - MAX_RECENT_MESSAGES)..MAX_RECENT_MESSAGES { + let digest = all_transactions[i].get_digest(); + let mut found = false; + for (_, hash) in relay_stats.recent_messages.get(&nk).unwrap().iter() { + found = found || (*hash == digest); + } + if !found { + assert!(false); + } + } + + // never overflow number of neighbors tracked + for i in 0..(MAX_RELAYER_STATS + 1) { + let mut new_nk = nk.clone(); + new_nk.peer_version += i as u32; + + relay_stats.add_relayed_message(new_nk, &all_transactions[0]); + + assert!(relay_stats.recent_updates.len() <= i + 1); + assert!(relay_stats.recent_updates.len() <= MAX_RELAYER_STATS); + } +} + +#[test] +fn test_relayer_merge_stats() { + let mut relayer_stats = RelayerStats::new(); + + let na = NeighborAddress { + addrbytes: PeerAddress([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0xff, 0xff, 127, 0, 0, 1]), + port: 54321, + public_key_hash: Hash160([0u8; 20]), + }; + + let relay_stats = RelayStats { + num_messages: 1, + num_bytes: 1, + last_seen: 1, + }; + + let mut rs = HashMap::new(); + rs.insert(na.clone(), relay_stats.clone()); + + relayer_stats.merge_relay_stats(rs); + assert_eq!(relayer_stats.relay_stats.len(), 1); + assert_eq!(relayer_stats.relay_stats.get(&na).unwrap().num_messages, 1); + assert_eq!(relayer_stats.relay_stats.get(&na).unwrap().num_bytes, 1); + assert_eq!(relayer_stats.relay_stats.get(&na).unwrap().last_seen, 1); + assert_eq!(relayer_stats.relay_updates.len(), 1); + + let now = get_epoch_time_secs() + 60; + + let relay_stats_2 = RelayStats { + num_messages: 2, + num_bytes: 2, + last_seen: now, + }; + + let mut rs = HashMap::new(); + rs.insert(na.clone(), relay_stats_2.clone()); + + relayer_stats.merge_relay_stats(rs); + assert_eq!(relayer_stats.relay_stats.len(), 1); + assert_eq!(relayer_stats.relay_stats.get(&na).unwrap().num_messages, 3); + assert_eq!(relayer_stats.relay_stats.get(&na).unwrap().num_bytes, 3); + assert!( + relayer_stats.relay_stats.get(&na).unwrap().last_seen < now + && relayer_stats.relay_stats.get(&na).unwrap().last_seen >= get_epoch_time_secs() + ); + assert_eq!(relayer_stats.relay_updates.len(), 1); + + let relay_stats_3 = RelayStats { + num_messages: 3, + num_bytes: 3, + last_seen: 0, + }; + + let mut rs = HashMap::new(); + rs.insert(na.clone(), relay_stats_3.clone()); + + relayer_stats.merge_relay_stats(rs); + assert_eq!(relayer_stats.relay_stats.len(), 1); + assert_eq!(relayer_stats.relay_stats.get(&na).unwrap().num_messages, 3); + assert_eq!(relayer_stats.relay_stats.get(&na).unwrap().num_bytes, 3); + assert!( + relayer_stats.relay_stats.get(&na).unwrap().last_seen < now + && relayer_stats.relay_stats.get(&na).unwrap().last_seen >= get_epoch_time_secs() + ); + assert_eq!(relayer_stats.relay_updates.len(), 1); + + for i in 0..(MAX_RELAYER_STATS + 1) { + let na = NeighborAddress { + addrbytes: PeerAddress([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0xff, 0xff, 127, 0, 0, 1]), + port: 14321 + (i as u16), + public_key_hash: Hash160([0u8; 20]), + }; + + let now = get_epoch_time_secs() + (i as u64) + 1; + + let relay_stats = RelayStats { + num_messages: 1, + num_bytes: 1, + last_seen: now, + }; + + let mut rs = HashMap::new(); + rs.insert(na.clone(), relay_stats.clone()); + + relayer_stats.merge_relay_stats(rs); + assert!(relayer_stats.relay_stats.len() <= MAX_RELAYER_STATS); + assert_eq!(relayer_stats.relay_stats.get(&na).unwrap().num_messages, 1); + assert_eq!(relayer_stats.relay_stats.get(&na).unwrap().num_bytes, 1); + assert_eq!(relayer_stats.relay_stats.get(&na).unwrap().last_seen, now); + } +} + +#[test] +fn test_relay_inbound_peer_rankings() { + let mut relay_stats = RelayerStats::new(); + + let all_transactions = codec_all_transactions( + &TransactionVersion::Testnet, + 0x80000000, + &TransactionAnchorMode::Any, + &TransactionPostConditionMode::Allow, + StacksEpochId::latest(), + ); + assert!(all_transactions.len() > MAX_RECENT_MESSAGES); + + let nk_1 = NeighborKey { + peer_version: 12345, + network_id: 0x80000000, + addrbytes: PeerAddress([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0xff, 0xff, 127, 0, 0, 1]), + port: 54321, + }; + + let nk_2 = NeighborKey { + peer_version: 12345, + network_id: 0x80000000, + addrbytes: PeerAddress([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0xff, 0xff, 127, 0, 0, 1]), + port: 54322, + }; + + let nk_3 = NeighborKey { + peer_version: 12345, + network_id: 0x80000000, + addrbytes: PeerAddress([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0xff, 0xff, 127, 0, 0, 1]), + port: 54323, + }; + + let dups = relay_stats.count_relay_dups(&all_transactions[0]); + assert_eq!(dups.len(), 0); + + relay_stats.add_relayed_message(nk_1.clone(), &all_transactions[0]); + relay_stats.add_relayed_message(nk_1.clone(), &all_transactions[0]); + relay_stats.add_relayed_message(nk_1.clone(), &all_transactions[0]); + + let dups = relay_stats.count_relay_dups(&all_transactions[0]); + assert_eq!(dups.len(), 1); + assert_eq!(*dups.get(&nk_1).unwrap(), 3); + + relay_stats.add_relayed_message(nk_2.clone(), &all_transactions[0]); + relay_stats.add_relayed_message(nk_2.clone(), &all_transactions[0]); + relay_stats.add_relayed_message(nk_2.clone(), &all_transactions[0]); + relay_stats.add_relayed_message(nk_2.clone(), &all_transactions[0]); + + let dups = relay_stats.count_relay_dups(&all_transactions[0]); + assert_eq!(dups.len(), 2); + assert_eq!(*dups.get(&nk_1).unwrap(), 3); + assert_eq!(*dups.get(&nk_2).unwrap(), 4); + + // total dups == 7 + let dist = relay_stats.get_inbound_relay_rankings( + &vec![nk_1.clone(), nk_2.clone(), nk_3.clone()], + &all_transactions[0], + 0, + ); + assert_eq!(*dist.get(&nk_1).unwrap(), 7 - 3 + 1); + assert_eq!(*dist.get(&nk_2).unwrap(), 7 - 4 + 1); + assert_eq!(*dist.get(&nk_3).unwrap(), 7 + 1); + + // high warmup period + let dist = relay_stats.get_inbound_relay_rankings( + &vec![nk_1.clone(), nk_2.clone(), nk_3.clone()], + &all_transactions[0], + 100, + ); + assert_eq!(*dist.get(&nk_1).unwrap(), 100 + 1); + assert_eq!(*dist.get(&nk_2).unwrap(), 100 + 1); + assert_eq!(*dist.get(&nk_3).unwrap(), 100 + 1); +} + +#[test] +fn test_relay_outbound_peer_rankings() { + let relay_stats = RelayerStats::new(); + + let asn1 = ASEntry4 { + prefix: 0x10000000, + mask: 8, + asn: 1, + org: 1, + }; + + let asn2 = ASEntry4 { + prefix: 0x20000000, + mask: 8, + asn: 2, + org: 2, + }; + + let nk_1 = NeighborKey { + peer_version: 12345, + network_id: 0x80000000, + addrbytes: PeerAddress([ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0xff, 0xff, 0x10, 0x11, 0x12, 0x13, + ]), + port: 54321, + }; + + let nk_2 = NeighborKey { + peer_version: 12345, + network_id: 0x80000000, + addrbytes: PeerAddress([ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0xff, 0xff, 0x20, 0x21, 0x22, 0x23, + ]), + port: 54322, + }; + + let nk_3 = NeighborKey { + peer_version: 12345, + network_id: 0x80000000, + addrbytes: PeerAddress([ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0xff, 0xff, 0x20, 0x21, 0x22, 0x24, + ]), + port: 54323, + }; + + let n1 = Neighbor { + addr: nk_1.clone(), + public_key: Secp256k1PublicKey::from_hex( + "0260569384baa726f877d47045931e5310383f18d0b243a9b6c095cee6ef19abd6", + ) + .unwrap(), + expire_block: 4302, + last_contact_time: 0, + allowed: 0, + denied: 0, + asn: 1, + org: 1, + in_degree: 0, + out_degree: 0, + }; + + let n2 = Neighbor { + addr: nk_2.clone(), + public_key: Secp256k1PublicKey::from_hex( + "02465f9ff58dfa8e844fec86fa5fc3fd59c75ea807e20d469b0a9f885d2891fbd4", + ) + .unwrap(), + expire_block: 4302, + last_contact_time: 0, + allowed: 0, + denied: 0, + asn: 2, + org: 2, + in_degree: 0, + out_degree: 0, + }; + + let n3 = Neighbor { + addr: nk_3.clone(), + public_key: Secp256k1PublicKey::from_hex( + "032d8a1ea2282c1514fdc1a6f21019561569d02a225cf7c14b4f803b0393cef031", + ) + .unwrap(), + expire_block: 4302, + last_contact_time: 0, + allowed: 0, + denied: 0, + asn: 2, + org: 2, + in_degree: 0, + out_degree: 0, + }; + + let peerdb = PeerDB::connect_memory( + 0x80000000, + 0, + 4032, + UrlString::try_from("http://foo.com").unwrap(), + &vec![asn1, asn2], + &vec![n1.clone(), n2.clone(), n3.clone()], + ) + .unwrap(); + + let asn_count = RelayerStats::count_ASNs( + peerdb.conn(), + &vec![nk_1.clone(), nk_2.clone(), nk_3.clone()], + ) + .unwrap(); + assert_eq!(asn_count.len(), 3); + assert_eq!(*asn_count.get(&nk_1).unwrap(), 1); + assert_eq!(*asn_count.get(&nk_2).unwrap(), 2); + assert_eq!(*asn_count.get(&nk_3).unwrap(), 2); + + let ranking = relay_stats + .get_outbound_relay_rankings(&peerdb, &vec![nk_1.clone(), nk_2.clone(), nk_3.clone()]) + .unwrap(); + assert_eq!(ranking.len(), 3); + assert_eq!(*ranking.get(&nk_1).unwrap(), 5 - 1 + 1); + assert_eq!(*ranking.get(&nk_2).unwrap(), 5 - 2 + 1); + assert_eq!(*ranking.get(&nk_3).unwrap(), 5 - 2 + 1); + + let ranking = relay_stats + .get_outbound_relay_rankings(&peerdb, &vec![nk_2.clone(), nk_3.clone()]) + .unwrap(); + assert_eq!(ranking.len(), 2); + assert_eq!(*ranking.get(&nk_2).unwrap(), 4 - 2 + 1); + assert_eq!(*ranking.get(&nk_3).unwrap(), 4 - 2 + 1); +} + +#[test] +#[ignore] +fn test_get_blocks_and_microblocks_3_peers_push_available() { + with_timeout(600, || { + run_get_blocks_and_microblocks( + "test_get_blocks_and_microblocks_3_peers_push_available", + 4200, + 3, + |ref mut peer_configs| { + // build initial network topology. + assert_eq!(peer_configs.len(), 3); + + // peer 0 produces the blocks + peer_configs[0].connection_opts.disable_chat_neighbors = true; + + // peer 1 downloads the blocks from peer 0, and sends + // BlocksAvailable and MicroblocksAvailable messages to + // peer 2. + peer_configs[1].connection_opts.disable_chat_neighbors = true; + + // peer 2 learns about the blocks and microblocks from peer 1's + // BlocksAvaiable and MicroblocksAvailable messages, but + // not from inv syncs. + peer_configs[2].connection_opts.disable_chat_neighbors = true; + peer_configs[2].connection_opts.disable_inv_sync = true; + + // disable nat punches -- disconnect/reconnect + // clears inv state + peer_configs[0].connection_opts.disable_natpunch = true; + peer_configs[1].connection_opts.disable_natpunch = true; + peer_configs[2].connection_opts.disable_natpunch = true; + + // do not push blocks and microblocks; only announce them + peer_configs[0].connection_opts.disable_block_push = true; + peer_configs[1].connection_opts.disable_block_push = true; + peer_configs[2].connection_opts.disable_block_push = true; + + peer_configs[0].connection_opts.disable_microblock_push = true; + peer_configs[1].connection_opts.disable_microblock_push = true; + peer_configs[2].connection_opts.disable_microblock_push = true; + + // generous timeouts + peer_configs[0].connection_opts.connect_timeout = 180; + peer_configs[1].connection_opts.connect_timeout = 180; + peer_configs[2].connection_opts.connect_timeout = 180; + peer_configs[0].connection_opts.timeout = 180; + peer_configs[1].connection_opts.timeout = 180; + peer_configs[2].connection_opts.timeout = 180; + + let peer_0 = peer_configs[0].to_neighbor(); + let peer_1 = peer_configs[1].to_neighbor(); + let peer_2 = peer_configs[2].to_neighbor(); + + peer_configs[0].add_neighbor(&peer_1); + peer_configs[1].add_neighbor(&peer_0); + peer_configs[2].add_neighbor(&peer_1); + }, + |num_blocks, ref mut peers| { + let tip = SortitionDB::get_canonical_burn_chain_tip( + &peers[0].sortdb.as_ref().unwrap().conn(), + ) + .unwrap(); + let this_reward_cycle = peers[0] + .config + .burnchain + .block_height_to_reward_cycle(tip.block_height) + .unwrap(); + + // build up block data to replicate + let mut block_data = vec![]; + for _ in 0..num_blocks { + // only produce blocks for a single reward + // cycle, since pushing block/microblock + // announcements in reward cycles the remote + // peer doesn't know about won't work. + let tip = SortitionDB::get_canonical_burn_chain_tip( + &peers[0].sortdb.as_ref().unwrap().conn(), + ) + .unwrap(); + if peers[0] + .config + .burnchain + .block_height_to_reward_cycle(tip.block_height) + .unwrap() + != this_reward_cycle + { + continue; + } + + let (mut burn_ops, stacks_block, microblocks) = peers[0].make_default_tenure(); + + let (_, burn_header_hash, consensus_hash) = + peers[0].next_burnchain_block(burn_ops.clone()); + peers[0].process_stacks_epoch_at_tip(&stacks_block, µblocks); + + TestPeer::set_ops_burn_header_hash(&mut burn_ops, &burn_header_hash); + + for i in 1..peers.len() { + peers[i].next_burnchain_block_raw(burn_ops.clone()); + } + + let sn = SortitionDB::get_canonical_burn_chain_tip( + &peers[0].sortdb.as_ref().unwrap().conn(), + ) + .unwrap(); + block_data.push(( + sn.consensus_hash.clone(), + Some(stacks_block), + Some(microblocks), + )); + } + + assert_eq!(block_data.len(), 5); + + block_data + }, + |ref mut peers| { + // make sure peer 2's inv has an entry for peer 1, even + // though it's not doing an inv sync. This is required for the downloader to + // work, and for (Micro)BlocksAvailable messages to be accepted + let peer_1_nk = peers[1].to_neighbor().addr; + let peer_2_nk = peers[2].to_neighbor().addr; + let bc = peers[1].config.burnchain.clone(); + match peers[2].network.inv_state { + Some(ref mut inv_state) => { + if inv_state.get_stats(&peer_1_nk).is_none() { + test_debug!("initialize inv statistics for peer 1 in peer 2"); + inv_state.add_peer(peer_1_nk.clone(), true); + if let Some(ref mut stats) = inv_state.get_stats_mut(&peer_1_nk) { + stats.scans = 1; + stats.inv.merge_pox_inv(&bc, 0, 6, vec![0xff], false); + stats.inv.merge_blocks_inv( + 0, + 30, + vec![0, 0, 0, 0, 0], + vec![0, 0, 0, 0, 0], + false, + ); + } else { + panic!("Unable to instantiate inv stats for {:?}", &peer_1_nk); + } + } else { + test_debug!("peer 2 has inv state for peer 1"); + } + } + None => { + test_debug!("No inv state for peer 1"); + } + } + + let tip = SortitionDB::get_canonical_burn_chain_tip( + &peers[0].sortdb.as_ref().unwrap().conn(), + ) + .unwrap(); + let this_reward_cycle = peers[0] + .config + .burnchain + .block_height_to_reward_cycle(tip.block_height) + .unwrap(); + + let peer_1_nk = peers[1].to_neighbor().addr; + match peers[2].network.inv_state { + Some(ref mut inv_state) => { + if inv_state.get_stats(&peer_1_nk).is_none() { + test_debug!("initialize inv statistics for peer 1 in peer 2"); + inv_state.add_peer(peer_1_nk.clone(), true); + + inv_state + .get_stats_mut(&peer_1_nk) + .unwrap() + .inv + .num_reward_cycles = this_reward_cycle; + inv_state.get_stats_mut(&peer_1_nk).unwrap().inv.pox_inv = vec![0x3f]; + } else { + test_debug!("peer 2 has inv state for peer 1"); + } + } + None => { + test_debug!("No inv state for peer 2"); + } + } + + // peer 2 should never see a BlocksInv + // message. That would imply it asked for an inv + for (_, convo) in peers[2].network.peers.iter() { + assert_eq!( + convo + .stats + .get_message_recv_count(StacksMessageID::BlocksInv), + 0 + ); + } + }, + |ref peer| { + // check peer health + // TODO + true + }, + |_| true, + ); + }) +} + +fn is_peer_connected(peer: &TestPeer, dest: &NeighborKey) -> bool { + let event_id = match peer.network.events.get(dest) { + Some(evid) => *evid, + None => { + return false; + } + }; + + match peer.network.peers.get(&event_id) { + Some(convo) => { + return convo.is_authenticated(); + } + None => { + return false; + } + } +} + +fn push_message( + peer: &mut TestPeer, + dest: &NeighborKey, + relay_hints: Vec, + msg: StacksMessageType, +) -> bool { + let event_id = match peer.network.events.get(dest) { + Some(evid) => *evid, + None => { + panic!("Unreachable peer: {:?}", dest); + } + }; + + let relay_msg = match peer.network.peers.get_mut(&event_id) { + Some(convo) => convo + .sign_relay_message( + &peer.network.local_peer, + &peer.network.chain_view, + relay_hints, + msg, + ) + .unwrap(), + None => { + panic!("No such event ID {} from neighbor {}", event_id, dest); + } + }; + + match peer.network.relay_signed_message(dest, relay_msg.clone()) { + Ok(_) => { + return true; + } + Err(net_error::OutboxOverflow) => { + test_debug!( + "{:?} outbox overflow; try again later", + &peer.to_neighbor().addr + ); + return false; + } + Err(net_error::SendError(msg)) => { + warn!( + "Failed to send to {:?}: SendError({})", + &peer.to_neighbor().addr, + msg + ); + return false; + } + Err(e) => { + test_debug!( + "{:?} encountered fatal error when forwarding: {:?}", + &peer.to_neighbor().addr, + &e + ); + assert!(false); + unreachable!(); + } + } +} + +fn http_rpc(peer_http: u16, request: StacksHttpRequest) -> Result { + use std::net::TcpStream; + + let mut sock = TcpStream::connect( + &format!("127.0.0.1:{}", peer_http) + .parse::() + .unwrap(), + ) + .unwrap(); + + let request_bytes = request.try_serialize().unwrap(); + match sock.write_all(&request_bytes) { + Ok(_) => {} + Err(e) => { + test_debug!("Client failed to write: {:?}", &e); + return Err(net_error::WriteError(e)); + } + } + + let mut resp = vec![]; + match sock.read_to_end(&mut resp) { + Ok(_) => { + if resp.len() == 0 { + test_debug!("Client did not receive any data"); + return Err(net_error::PermanentlyDrained); + } + } + Err(e) => { + test_debug!("Client failed to read: {:?}", &e); + return Err(net_error::ReadError(e)); + } + } + + test_debug!("Client received {} bytes", resp.len()); + let response = StacksHttp::parse_response( + &request.preamble().verb, + &request.preamble().path_and_query_str, + &resp, + ) + .unwrap(); + match response { + StacksHttpMessage::Response(x) => Ok(x), + _ => { + panic!("Did not receive a Response"); + } + } +} + +pub fn broadcast_message( + broadcaster: &mut TestPeer, + relay_hints: Vec, + msg: StacksMessageType, +) -> bool { + let request = NetworkRequest::Broadcast(relay_hints, msg); + match broadcaster.network.dispatch_request(request) { + Ok(_) => true, + Err(e) => { + error!("Failed to broadcast: {:?}", &e); + false + } + } +} + +fn push_block( + peer: &mut TestPeer, + dest: &NeighborKey, + relay_hints: Vec, + consensus_hash: ConsensusHash, + block: StacksBlock, +) -> bool { + test_debug!( + "{:?}: Push block {}/{} to {:?}", + peer.to_neighbor().addr, + &consensus_hash, + block.block_hash(), + dest + ); + + let sn = SortitionDB::get_block_snapshot_consensus( + peer.sortdb.as_ref().unwrap().conn(), + &consensus_hash, + ) + .unwrap() + .unwrap(); + let consensus_hash = sn.consensus_hash; + + let msg = StacksMessageType::Blocks(BlocksData { + blocks: vec![BlocksDatum(consensus_hash, block)], + }); + push_message(peer, dest, relay_hints, msg) +} + +fn broadcast_block( + peer: &mut TestPeer, + relay_hints: Vec, + consensus_hash: ConsensusHash, + block: StacksBlock, +) -> bool { + test_debug!( + "{:?}: Broadcast block {}/{}", + peer.to_neighbor().addr, + &consensus_hash, + block.block_hash(), + ); + + let sn = SortitionDB::get_block_snapshot_consensus( + peer.sortdb.as_ref().unwrap().conn(), + &consensus_hash, + ) + .unwrap() + .unwrap(); + let consensus_hash = sn.consensus_hash; + + let msg = StacksMessageType::Blocks(BlocksData { + blocks: vec![BlocksDatum(consensus_hash, block)], + }); + broadcast_message(peer, relay_hints, msg) +} + +fn push_microblocks( + peer: &mut TestPeer, + dest: &NeighborKey, + relay_hints: Vec, + consensus_hash: ConsensusHash, + block_hash: BlockHeaderHash, + microblocks: Vec, +) -> bool { + test_debug!( + "{:?}: Push {} microblocksblock {}/{} to {:?}", + peer.to_neighbor().addr, + microblocks.len(), + &consensus_hash, + &block_hash, + dest + ); + let msg = StacksMessageType::Microblocks(MicroblocksData { + index_anchor_block: StacksBlockHeader::make_index_block_hash(&consensus_hash, &block_hash), + microblocks: microblocks, + }); + push_message(peer, dest, relay_hints, msg) +} + +fn broadcast_microblocks( + peer: &mut TestPeer, + relay_hints: Vec, + consensus_hash: ConsensusHash, + block_hash: BlockHeaderHash, + microblocks: Vec, +) -> bool { + test_debug!( + "{:?}: broadcast {} microblocksblock {}/{}", + peer.to_neighbor().addr, + microblocks.len(), + &consensus_hash, + &block_hash, + ); + let msg = StacksMessageType::Microblocks(MicroblocksData { + index_anchor_block: StacksBlockHeader::make_index_block_hash(&consensus_hash, &block_hash), + microblocks: microblocks, + }); + broadcast_message(peer, relay_hints, msg) +} + +fn push_transaction( + peer: &mut TestPeer, + dest: &NeighborKey, + relay_hints: Vec, + tx: StacksTransaction, +) -> bool { + test_debug!( + "{:?}: Push tx {} to {:?}", + peer.to_neighbor().addr, + tx.txid(), + dest + ); + let msg = StacksMessageType::Transaction(tx); + push_message(peer, dest, relay_hints, msg) +} + +fn broadcast_transaction( + peer: &mut TestPeer, + relay_hints: Vec, + tx: StacksTransaction, +) -> bool { + test_debug!("{:?}: broadcast tx {}", peer.to_neighbor().addr, tx.txid(),); + let msg = StacksMessageType::Transaction(tx); + broadcast_message(peer, relay_hints, msg) +} + +fn http_get_info(http_port: u16) -> RPCPeerInfoData { + let mut request = HttpRequestPreamble::new_for_peer( + PeerHost::from_host_port("127.0.0.1".to_string(), http_port), + "GET".to_string(), + "/v2/info".to_string(), + ); + request.keep_alive = false; + let getinfo = StacksHttpRequest::new(request, HttpRequestContents::new()); + let response = http_rpc(http_port, getinfo).unwrap(); + let peer_info = response.decode_peer_info().unwrap(); + peer_info +} + +fn http_post_block(http_port: u16, consensus_hash: &ConsensusHash, block: &StacksBlock) -> bool { + test_debug!( + "upload block {}/{} to localhost:{}", + consensus_hash, + block.block_hash(), + http_port + ); + let mut request = HttpRequestPreamble::new_for_peer( + PeerHost::from_host_port("127.0.0.1".to_string(), http_port), + "POST".to_string(), + "/v2/blocks".to_string(), + ); + request.keep_alive = false; + let post_block = + StacksHttpRequest::new(request, HttpRequestContents::new().payload_stacks(block)); + + let response = http_rpc(http_port, post_block).unwrap(); + let accepted = response.decode_stacks_block_accepted().unwrap(); + accepted.accepted +} + +fn http_post_microblock( + http_port: u16, + consensus_hash: &ConsensusHash, + block_hash: &BlockHeaderHash, + mblock: &StacksMicroblock, +) -> bool { + test_debug!( + "upload microblock {}/{}-{} to localhost:{}", + consensus_hash, + block_hash, + mblock.block_hash(), + http_port + ); + let mut request = HttpRequestPreamble::new_for_peer( + PeerHost::from_host_port("127.0.0.1".to_string(), http_port), + "POST".to_string(), + "/v2/microblocks".to_string(), + ); + request.keep_alive = false; + let tip = StacksBlockHeader::make_index_block_hash(consensus_hash, block_hash); + let post_microblock = StacksHttpRequest::new( + request, + HttpRequestContents::new() + .payload_stacks(mblock) + .for_specific_tip(tip), + ); + + let response = http_rpc(http_port, post_microblock).unwrap(); + let payload = response.get_http_payload_ok().unwrap(); + let bhh: BlockHeaderHash = serde_json::from_value(payload.try_into().unwrap()).unwrap(); + return true; +} + +fn test_get_blocks_and_microblocks_2_peers_push_blocks_and_microblocks( + outbound_test: bool, + disable_push: bool, +) { + with_timeout(600, move || { + let original_blocks_and_microblocks = RefCell::new(vec![]); + let blocks_and_microblocks = RefCell::new(vec![]); + let idx = RefCell::new(0); + let sent_blocks = RefCell::new(false); + let sent_microblocks = RefCell::new(false); + + run_get_blocks_and_microblocks( + "test_get_blocks_and_microblocks_2_peers_push_blocks_and_microblocks", + 4210, + 2, + |ref mut peer_configs| { + // build initial network topology. + assert_eq!(peer_configs.len(), 2); + + // peer 0 produces the blocks and pushes them to peer 1 + // peer 1 receives the blocks and microblocks. It + // doesn't download them, nor does it try to get invs + peer_configs[0].connection_opts.disable_block_advertisement = true; + + peer_configs[1].connection_opts.disable_inv_sync = true; + peer_configs[1].connection_opts.disable_block_download = true; + peer_configs[1].connection_opts.disable_block_advertisement = true; + + // disable nat punches -- disconnect/reconnect + // clears inv state + peer_configs[0].connection_opts.disable_natpunch = true; + peer_configs[1].connection_opts.disable_natpunch = true; + + // force usage of blocksavailable/microblocksavailable? + if disable_push { + peer_configs[0].connection_opts.disable_block_push = true; + peer_configs[0].connection_opts.disable_microblock_push = true; + peer_configs[1].connection_opts.disable_block_push = true; + peer_configs[1].connection_opts.disable_microblock_push = true; + } + + let peer_0 = peer_configs[0].to_neighbor(); + let peer_1 = peer_configs[1].to_neighbor(); + + peer_configs[0].add_neighbor(&peer_1); + + if outbound_test { + // neighbor relationship is symmetric -- peer 1 has an outbound connection + // to peer 0. + peer_configs[1].add_neighbor(&peer_0); + } + }, + |num_blocks, ref mut peers| { + let tip = SortitionDB::get_canonical_burn_chain_tip( + &peers[0].sortdb.as_ref().unwrap().conn(), + ) + .unwrap(); + let this_reward_cycle = peers[0] + .config + .burnchain + .block_height_to_reward_cycle(tip.block_height) + .unwrap(); + + // build up block data to replicate + let mut block_data = vec![]; + for _ in 0..num_blocks { + let tip = SortitionDB::get_canonical_burn_chain_tip( + &peers[0].sortdb.as_ref().unwrap().conn(), + ) + .unwrap(); + if peers[0] + .config + .burnchain + .block_height_to_reward_cycle(tip.block_height) + .unwrap() + != this_reward_cycle + { + continue; + } + let (mut burn_ops, stacks_block, microblocks) = peers[0].make_default_tenure(); + + let (_, burn_header_hash, consensus_hash) = + peers[0].next_burnchain_block(burn_ops.clone()); + peers[0].process_stacks_epoch_at_tip(&stacks_block, µblocks); + + TestPeer::set_ops_burn_header_hash(&mut burn_ops, &burn_header_hash); + + for i in 1..peers.len() { + peers[i].next_burnchain_block_raw(burn_ops.clone()); + } + + let sn = SortitionDB::get_canonical_burn_chain_tip( + &peers[0].sortdb.as_ref().unwrap().conn(), + ) + .unwrap(); + block_data.push(( + sn.consensus_hash.clone(), + Some(stacks_block), + Some(microblocks), + )); + } + let saved_copy: Vec<(ConsensusHash, StacksBlock, Vec)> = + block_data + .clone() + .drain(..) + .map(|(ch, blk_opt, mblocks_opt)| { + (ch, blk_opt.unwrap(), mblocks_opt.unwrap()) + }) + .collect(); + *blocks_and_microblocks.borrow_mut() = saved_copy.clone(); + *original_blocks_and_microblocks.borrow_mut() = saved_copy; + block_data + }, + |ref mut peers| { + if !disable_push { + for peer in peers.iter_mut() { + // force peers to keep trying to process buffered data + peer.network.burnchain_tip.burn_header_hash = + BurnchainHeaderHash([0u8; 32]); + } + } + + // make sure peer 1's inv has an entry for peer 0, even + // though it's not doing an inv sync. This is required for the downloader to + // work + let peer_0_nk = peers[0].to_neighbor().addr; + let peer_1_nk = peers[1].to_neighbor().addr; + match peers[1].network.inv_state { + Some(ref mut inv_state) => { + if inv_state.get_stats(&peer_0_nk).is_none() { + test_debug!("initialize inv statistics for peer 0 in peer 1"); + inv_state.add_peer(peer_0_nk.clone(), true); + } else { + test_debug!("peer 1 has inv state for peer 0"); + } + } + None => { + test_debug!("No inv state for peer 1"); + } + } + + if is_peer_connected(&peers[0], &peer_1_nk) { + // randomly push a block and/or microblocks to peer 1. + let mut block_data = blocks_and_microblocks.borrow_mut(); + let original_block_data = original_blocks_and_microblocks.borrow(); + let mut next_idx = idx.borrow_mut(); + let data_to_push = { + if block_data.len() > 0 { + let (consensus_hash, block, microblocks) = + block_data[*next_idx].clone(); + Some((consensus_hash, block, microblocks)) + } else { + // start over (can happen if a message gets + // dropped due to a timeout) + test_debug!("Reset block transmission (possible timeout)"); + *block_data = (*original_block_data).clone(); + *next_idx = thread_rng().gen::() % block_data.len(); + let (consensus_hash, block, microblocks) = + block_data[*next_idx].clone(); + Some((consensus_hash, block, microblocks)) + } + }; + + if let Some((consensus_hash, block, microblocks)) = data_to_push { + test_debug!( + "Push block {}/{} and microblocks", + &consensus_hash, + block.block_hash() + ); + + let block_hash = block.block_hash(); + let mut sent_blocks = sent_blocks.borrow_mut(); + let mut sent_microblocks = sent_microblocks.borrow_mut(); + + let pushed_block = if !*sent_blocks { + push_block( + &mut peers[0], + &peer_1_nk, + vec![], + consensus_hash.clone(), + block, + ) + } else { + true + }; + + *sent_blocks = pushed_block; + + if pushed_block { + let pushed_microblock = if !*sent_microblocks { + push_microblocks( + &mut peers[0], + &peer_1_nk, + vec![], + consensus_hash, + block_hash, + microblocks, + ) + } else { + true + }; + + *sent_microblocks = pushed_microblock; + + if pushed_block && pushed_microblock { + block_data.remove(*next_idx); + if block_data.len() > 0 { + *next_idx = thread_rng().gen::() % block_data.len(); + } + *sent_blocks = false; + *sent_microblocks = false; + } + } + test_debug!("{} blocks/microblocks remaining", block_data.len()); + } + } + + // peer 0 should never see a GetBlocksInv message. + // peer 1 should never see a BlocksInv message + for (_, convo) in peers[0].network.peers.iter() { + assert_eq!( + convo + .stats + .get_message_recv_count(StacksMessageID::GetBlocksInv), + 0 + ); + } + for (_, convo) in peers[1].network.peers.iter() { + assert_eq!( + convo + .stats + .get_message_recv_count(StacksMessageID::BlocksInv), + 0 + ); + } + }, + |ref peer| { + // check peer health + // nothing should break + // TODO + true + }, + |_| true, + ); + }) +} + +#[test] +#[ignore] +fn test_get_blocks_and_microblocks_2_peers_push_blocks_and_microblocks_outbound() { + // simulates node 0 pushing blocks to node 1, but node 0 is publicly routable. + // nodes rely on blocksavailable/microblocksavailable to discover blocks + test_get_blocks_and_microblocks_2_peers_push_blocks_and_microblocks(true, true) +} + +#[test] +#[ignore] +fn test_get_blocks_and_microblocks_2_peers_push_blocks_and_microblocks_inbound() { + // simulates node 0 pushing blocks to node 1, where node 0 is behind a NAT + // nodes rely on blocksavailable/microblocksavailable to discover blocks + test_get_blocks_and_microblocks_2_peers_push_blocks_and_microblocks(false, true) +} + +#[test] +#[ignore] +fn test_get_blocks_and_microblocks_2_peers_push_blocks_and_microblocks_outbound_direct() { + // simulates node 0 pushing blocks to node 1, but node 0 is publicly routable. + // nodes may push blocks and microblocks directly to each other + test_get_blocks_and_microblocks_2_peers_push_blocks_and_microblocks(true, false) +} + +#[test] +#[ignore] +fn test_get_blocks_and_microblocks_2_peers_push_blocks_and_microblocks_inbound_direct() { + // simulates node 0 pushing blocks to node 1, where node 0 is behind a NAT + // nodes may push blocks and microblocks directly to each other + test_get_blocks_and_microblocks_2_peers_push_blocks_and_microblocks(false, false) +} + +#[test] +#[ignore] +fn test_get_blocks_and_microblocks_upload_blocks_http() { + with_timeout(600, || { + let (port_sx, port_rx) = std::sync::mpsc::sync_channel(1); + let (block_sx, block_rx) = std::sync::mpsc::sync_channel(1); + + std::thread::spawn(move || loop { + eprintln!("Get port"); + let remote_port: u16 = port_rx.recv().unwrap(); + eprintln!("Got port {}", remote_port); + + eprintln!("Send getinfo"); + let peer_info = http_get_info(remote_port); + eprintln!("Got getinfo! {:?}", &peer_info); + let idx = peer_info.stacks_tip_height as usize; + + eprintln!("Get blocks and microblocks"); + let blocks_and_microblocks: Vec<( + ConsensusHash, + Option, + Option>, + )> = block_rx.recv().unwrap(); + eprintln!("Got blocks and microblocks!"); + + if idx >= blocks_and_microblocks.len() { + eprintln!("Out of blocks to send!"); + return; + } + + eprintln!( + "Upload block {}", + &blocks_and_microblocks[idx].1.as_ref().unwrap().block_hash() + ); + http_post_block( + remote_port, + &blocks_and_microblocks[idx].0, + blocks_and_microblocks[idx].1.as_ref().unwrap(), + ); + for mblock in blocks_and_microblocks[idx].2.as_ref().unwrap().iter() { + eprintln!("Upload microblock {}", mblock.block_hash()); + http_post_microblock( + remote_port, + &blocks_and_microblocks[idx].0, + &blocks_and_microblocks[idx].1.as_ref().unwrap().block_hash(), + mblock, + ); + } + }); + + let original_blocks_and_microblocks = RefCell::new(vec![]); + let port_sx_cell = RefCell::new(port_sx); + let block_sx_cell = RefCell::new(block_sx); + + run_get_blocks_and_microblocks( + "test_get_blocks_and_microblocks_upload_blocks_http", + 4250, + 2, + |ref mut peer_configs| { + // build initial network topology. + assert_eq!(peer_configs.len(), 2); + + // peer 0 produces the blocks + peer_configs[0].connection_opts.disable_chat_neighbors = true; + + // peer 0 sends them to peer 1 + peer_configs[1].connection_opts.disable_chat_neighbors = true; + peer_configs[1].connection_opts.disable_inv_sync = true; + + // disable nat punches -- disconnect/reconnect + // clears inv state + peer_configs[0].connection_opts.disable_natpunch = true; + peer_configs[1].connection_opts.disable_natpunch = true; + + // generous timeouts + peer_configs[0].connection_opts.timeout = 180; + peer_configs[1].connection_opts.timeout = 180; + + let peer_0 = peer_configs[0].to_neighbor(); + let peer_1 = peer_configs[1].to_neighbor(); + }, + |num_blocks, ref mut peers| { + let tip = SortitionDB::get_canonical_burn_chain_tip( + &peers[0].sortdb.as_ref().unwrap().conn(), + ) + .unwrap(); + let this_reward_cycle = peers[0] + .config + .burnchain + .block_height_to_reward_cycle(tip.block_height) + .unwrap(); + + // build up block data to replicate + let mut block_data = vec![]; + for _ in 0..num_blocks { + // only produce blocks for a single reward + // cycle, since pushing block/microblock + // announcements in reward cycles the remote + // peer doesn't know about won't work. + let tip = SortitionDB::get_canonical_burn_chain_tip( + &peers[0].sortdb.as_ref().unwrap().conn(), + ) + .unwrap(); + if peers[0] + .config + .burnchain + .block_height_to_reward_cycle(tip.block_height) + .unwrap() + != this_reward_cycle + { + continue; + } + + let (mut burn_ops, stacks_block, microblocks) = peers[0].make_default_tenure(); + + let (_, burn_header_hash, consensus_hash) = + peers[0].next_burnchain_block(burn_ops.clone()); + peers[0].process_stacks_epoch_at_tip(&stacks_block, µblocks); + + TestPeer::set_ops_burn_header_hash(&mut burn_ops, &burn_header_hash); + + for i in 1..peers.len() { + peers[i].next_burnchain_block_raw(burn_ops.clone()); + } + + let sn = SortitionDB::get_canonical_burn_chain_tip( + &peers[0].sortdb.as_ref().unwrap().conn(), + ) + .unwrap(); + block_data.push(( + sn.consensus_hash.clone(), + Some(stacks_block), + Some(microblocks), + )); + } + + assert_eq!(block_data.len(), 5); + + *original_blocks_and_microblocks.borrow_mut() = block_data.clone(); + + block_data + }, + |ref mut peers| { + let blocks_and_microblocks = original_blocks_and_microblocks.borrow().clone(); + let remote_port = peers[1].config.http_port; + + let port_sx = port_sx_cell.borrow_mut(); + let block_sx = block_sx_cell.borrow_mut(); + + let _ = (*port_sx).try_send(remote_port); + let _ = (*block_sx).try_send(blocks_and_microblocks); + }, + |ref peer| { + // check peer health + // TODO + true + }, + |_| true, + ); + }) +} + +fn make_test_smart_contract_transaction( + peer: &mut TestPeer, + name: &str, + consensus_hash: &ConsensusHash, + block_hash: &BlockHeaderHash, +) -> StacksTransaction { + // make a smart contract + let contract = " + (define-data-var bar int 0) + (define-public (get-bar) (ok (var-get bar))) + (define-public (set-bar (x int) (y int)) + (begin (var-set bar (/ x y)) (ok (var-get bar))))"; + + let cost_limits = peer.config.connection_opts.read_only_call_limit.clone(); + + let tx_contract = peer + .with_mining_state( + |ref mut sortdb, ref mut miner, ref mut spending_account, ref mut stacks_node| { + let mut tx_contract = StacksTransaction::new( + TransactionVersion::Testnet, + spending_account.as_transaction_auth().unwrap().into(), + TransactionPayload::new_smart_contract( + &name.to_string(), + &contract.to_string(), + None, + ) + .unwrap(), + ); + + let chain_tip = + StacksBlockHeader::make_index_block_hash(consensus_hash, block_hash); + let iconn = sortdb + .index_handle_at_block(&stacks_node.chainstate, &chain_tip) + .unwrap(); + let cur_nonce = stacks_node + .chainstate + .with_read_only_clarity_tx(&iconn, &chain_tip, |clarity_tx| { + clarity_tx.with_clarity_db_readonly(|clarity_db| { + clarity_db + .get_account_nonce( + &spending_account.origin_address().unwrap().into(), + ) + .unwrap() + }) + }) + .unwrap(); + + test_debug!( + "Nonce of {:?} is {} at {}/{}", + &spending_account.origin_address().unwrap(), + cur_nonce, + consensus_hash, + block_hash + ); + + // spending_account.set_nonce(cur_nonce + 1); + + tx_contract.chain_id = 0x80000000; + tx_contract.auth.set_origin_nonce(cur_nonce); + tx_contract.set_tx_fee(MINIMUM_TX_FEE_RATE_PER_BYTE * 500); + + let mut tx_signer = StacksTransactionSigner::new(&tx_contract); + spending_account.sign_as_origin(&mut tx_signer); + + let tx_contract_signed = tx_signer.get_tx().unwrap(); + + test_debug!( + "make transaction {:?} off of {:?}/{:?}: {:?}", + &tx_contract_signed.txid(), + consensus_hash, + block_hash, + &tx_contract_signed + ); + + Ok(tx_contract_signed) + }, + ) + .unwrap(); + + tx_contract +} + +#[test] +#[ignore] +fn test_get_blocks_and_microblocks_2_peers_push_transactions() { + with_timeout(600, || { + let blocks_and_microblocks = RefCell::new(vec![]); + let blocks_idx = RefCell::new(0); + let sent_txs = RefCell::new(vec![]); + let done = RefCell::new(false); + + let peers = run_get_blocks_and_microblocks( + "test_get_blocks_and_microblocks_2_peers_push_transactions", + 4220, + 2, + |ref mut peer_configs| { + // build initial network topology. + assert_eq!(peer_configs.len(), 2); + + // peer 0 generates blocks and microblocks, and pushes + // them to peer 1. Peer 0 also generates transactions + // and pushes them to peer 1. + peer_configs[0].connection_opts.disable_block_advertisement = true; + + // let peer 0 drive this test, as before, by controlling + // when peer 1 sees blocks. + peer_configs[1].connection_opts.disable_inv_sync = true; + peer_configs[1].connection_opts.disable_block_download = true; + peer_configs[1].connection_opts.disable_block_advertisement = true; + + peer_configs[0].connection_opts.outbox_maxlen = 100; + peer_configs[1].connection_opts.inbox_maxlen = 100; + + // disable nat punches -- disconnect/reconnect + // clears inv state + peer_configs[0].connection_opts.disable_natpunch = true; + peer_configs[1].connection_opts.disable_natpunch = true; + + let initial_balances = vec![ + ( + PrincipalData::from( + peer_configs[0].spending_account.origin_address().unwrap(), + ), + 1000000, + ), + ( + PrincipalData::from( + peer_configs[1].spending_account.origin_address().unwrap(), + ), + 1000000, + ), + ]; + + peer_configs[0].initial_balances = initial_balances.clone(); + peer_configs[1].initial_balances = initial_balances.clone(); + + let peer_0 = peer_configs[0].to_neighbor(); + let peer_1 = peer_configs[1].to_neighbor(); + + peer_configs[0].add_neighbor(&peer_1); + peer_configs[1].add_neighbor(&peer_0); + }, + |num_blocks, ref mut peers| { + let tip = SortitionDB::get_canonical_burn_chain_tip( + &peers[0].sortdb.as_ref().unwrap().conn(), + ) + .unwrap(); + let this_reward_cycle = peers[0] + .config + .burnchain + .block_height_to_reward_cycle(tip.block_height) + .unwrap(); + + // build up block data to replicate + let mut block_data = vec![]; + for b in 0..num_blocks { + let tip = SortitionDB::get_canonical_burn_chain_tip( + &peers[0].sortdb.as_ref().unwrap().conn(), + ) + .unwrap(); + if peers[0] + .config + .burnchain + .block_height_to_reward_cycle(tip.block_height) + .unwrap() + != this_reward_cycle + { + continue; + } + let (mut burn_ops, stacks_block, microblocks) = peers[0].make_default_tenure(); + + let (_, burn_header_hash, consensus_hash) = + peers[0].next_burnchain_block(burn_ops.clone()); + peers[0].process_stacks_epoch_at_tip(&stacks_block, µblocks); + + TestPeer::set_ops_burn_header_hash(&mut burn_ops, &burn_header_hash); + + for i in 1..peers.len() { + peers[i].next_burnchain_block_raw(burn_ops.clone()); + if b == 0 { + // prime with first block + peers[i].process_stacks_epoch_at_tip(&stacks_block, &vec![]); + } + } + + let sn = SortitionDB::get_canonical_burn_chain_tip( + &peers[0].sortdb.as_ref().unwrap().conn(), + ) + .unwrap(); + block_data.push(( + sn.consensus_hash.clone(), + Some(stacks_block), + Some(microblocks), + )); + } + *blocks_and_microblocks.borrow_mut() = block_data + .clone() + .drain(..) + .map(|(ch, blk_opt, mblocks_opt)| (ch, blk_opt.unwrap(), mblocks_opt.unwrap())) + .collect(); + block_data + }, + |ref mut peers| { + let peer_0_nk = peers[0].to_neighbor().addr; + let peer_1_nk = peers[1].to_neighbor().addr; + + // peers must be connected to each other + let mut peer_0_to_1 = false; + let mut peer_1_to_0 = false; + for (nk, event_id) in peers[0].network.events.iter() { + match peers[0].network.peers.get(event_id) { + Some(convo) => { + if *nk == peer_1_nk { + peer_0_to_1 = true; + } + } + None => {} + } + } + for (nk, event_id) in peers[1].network.events.iter() { + match peers[1].network.peers.get(event_id) { + Some(convo) => { + if *nk == peer_0_nk { + peer_1_to_0 = true; + } + } + None => {} + } + } + + if !peer_0_to_1 || !peer_1_to_0 { + test_debug!( + "Peers not bi-directionally connected: 0->1 = {}, 1->0 = {}", + peer_0_to_1, + peer_1_to_0 + ); + return; + } + + // make sure peer 2's inv has an entry for peer 1, even + // though it's not doing an inv sync. + match peers[1].network.inv_state { + Some(ref mut inv_state) => { + if inv_state.get_stats(&peer_0_nk).is_none() { + test_debug!("initialize inv statistics for peer 0 in peer 1"); + inv_state.add_peer(peer_0_nk, true); + } else { + test_debug!("peer 1 has inv state for peer 0"); + } + } + None => { + test_debug!("No inv state for peer 1"); + } + } + + let done_flag = *done.borrow(); + if is_peer_connected(&peers[0], &peer_1_nk) { + // only submit the next transaction if the previous + // one is accepted + let has_last_transaction = { + let expected_txs: std::cell::Ref<'_, Vec> = + sent_txs.borrow(); + if let Some(tx) = (*expected_txs).last() { + let txid = tx.txid(); + if !peers[1].mempool.as_ref().unwrap().has_tx(&txid) { + debug!("Peer 1 still waiting for transaction {}", &txid); + push_transaction(&mut peers[0], &peer_1_nk, vec![], (*tx).clone()); + false + } else { + true + } + } else { + true + } + }; + + if has_last_transaction { + // push blocks and microblocks in order, and push a + // transaction that can only be validated once the + // block and microblocks are processed. + let ( + ( + block_consensus_hash, + block, + microblocks_consensus_hash, + microblocks_block_hash, + microblocks, + ), + idx, + ) = { + let block_data = blocks_and_microblocks.borrow(); + let mut idx = blocks_idx.borrow_mut(); + + let microblocks = block_data[*idx].2.clone(); + let microblocks_consensus_hash = block_data[*idx].0.clone(); + let microblocks_block_hash = block_data[*idx].1.block_hash(); + + *idx += 1; + if *idx >= block_data.len() { + *idx = 1; + } + + let block = block_data[*idx].1.clone(); + let block_consensus_hash = block_data[*idx].0.clone(); + ( + ( + block_consensus_hash, + block, + microblocks_consensus_hash, + microblocks_block_hash, + microblocks, + ), + *idx, + ) + }; + + if !done_flag { + test_debug!( + "Push microblocks built by {}/{} (idx={})", + µblocks_consensus_hash, + µblocks_block_hash, + idx + ); + + let block_hash = block.block_hash(); + push_microblocks( + &mut peers[0], + &peer_1_nk, + vec![], + microblocks_consensus_hash, + microblocks_block_hash, + microblocks, + ); + + test_debug!( + "Push block {}/{} and microblocks (idx = {})", + &block_consensus_hash, + block.block_hash(), + idx + ); + push_block( + &mut peers[0], + &peer_1_nk, + vec![], + block_consensus_hash.clone(), + block, + ); + + // create a transaction against the resulting + // (anchored) chain tip + let tx = make_test_smart_contract_transaction( + &mut peers[0], + &format!("test-contract-{}", &block_hash.to_hex()[0..10]), + &block_consensus_hash, + &block_hash, + ); + + // push or post + push_transaction(&mut peers[0], &peer_1_nk, vec![], tx.clone()); + + let mut expected_txs = sent_txs.borrow_mut(); + expected_txs.push(tx); + } else { + test_debug!("Done pushing data"); + } + } + } + + // peer 0 should never see a GetBlocksInv message. + // peer 1 should never see a BlocksInv message + for (_, convo) in peers[0].network.peers.iter() { + assert_eq!( + convo + .stats + .get_message_recv_count(StacksMessageID::GetBlocksInv), + 0 + ); + } + for (_, convo) in peers[1].network.peers.iter() { + assert_eq!( + convo + .stats + .get_message_recv_count(StacksMessageID::BlocksInv), + 0 + ); + } + }, + |ref peer| { + // check peer health + // nothing should break + // TODO + true + }, + |ref mut peers| { + // all blocks downloaded. only stop if peer 1 has + // all the transactions + let mut done_flag = done.borrow_mut(); + *done_flag = true; + + let txs = + MemPoolDB::get_all_txs(peers[1].mempool.as_ref().unwrap().conn()).unwrap(); + test_debug!("Peer 1 has {} txs", txs.len()); + txs.len() == sent_txs.borrow().len() + }, + ); + + // peer 1 should have all the transactions + let blocks_and_microblocks = blocks_and_microblocks.into_inner(); + + let txs = MemPoolDB::get_all_txs(peers[1].mempool.as_ref().unwrap().conn()).unwrap(); + let expected_txs = sent_txs.into_inner(); + for tx in txs.iter() { + let mut found = false; + for expected_tx in expected_txs.iter() { + if tx.tx.txid() == expected_tx.txid() { + found = true; + break; + } + } + if !found { + panic!("Transaction not found: {:?}", &tx.tx); + } + } + + // peer 1 should have 1 tx per chain tip + for ((consensus_hash, block, _), sent_tx) in + blocks_and_microblocks.iter().zip(expected_txs.iter()) + { + let block_hash = block.block_hash(); + let tx_infos = MemPoolDB::get_txs_after( + peers[1].mempool.as_ref().unwrap().conn(), + consensus_hash, + &block_hash, + 0, + 1000, + ) + .unwrap(); + test_debug!( + "Check {}/{} (height {}): expect {}", + &consensus_hash, + &block_hash, + block.header.total_work.work, + &sent_tx.txid() + ); + assert_eq!(tx_infos.len(), 1); + assert_eq!(tx_infos[0].tx.txid(), sent_tx.txid()); + } + }) +} + +#[test] +#[ignore] +fn test_get_blocks_and_microblocks_peers_broadcast() { + with_timeout(600, || { + let blocks_and_microblocks = RefCell::new(vec![]); + let blocks_idx = RefCell::new(0); + let sent_txs = RefCell::new(vec![]); + let done = RefCell::new(false); + let num_peers = 3; + let privk = StacksPrivateKey::new(); + + let peers = run_get_blocks_and_microblocks( + "test_get_blocks_and_microblocks_peers_broadcast", + 4230, + num_peers, + |ref mut peer_configs| { + // build initial network topology. + assert_eq!(peer_configs.len(), num_peers); + + // peer 0 generates blocks and microblocks, and pushes + // them to peers 1..n. Peer 0 also generates transactions + // and broadcasts them to the network. + + peer_configs[0].connection_opts.disable_inv_sync = true; + peer_configs[0].connection_opts.disable_inv_chat = true; + + // disable nat punches -- disconnect/reconnect + // clears inv state. + for i in 0..peer_configs.len() { + peer_configs[i].connection_opts.disable_natpunch = true; + peer_configs[i].connection_opts.disable_network_prune = true; + peer_configs[i].connection_opts.timeout = 600; + peer_configs[i].connection_opts.connect_timeout = 600; + + // do one walk + peer_configs[i].connection_opts.num_initial_walks = 0; + peer_configs[i].connection_opts.walk_retry_count = 0; + peer_configs[i].connection_opts.walk_interval = 600; + + // don't throttle downloads + peer_configs[i].connection_opts.download_interval = 0; + peer_configs[i].connection_opts.inv_sync_interval = 0; + + let max_inflight = peer_configs[i].connection_opts.max_inflight_blocks; + peer_configs[i].connection_opts.max_clients_per_host = + ((num_peers + 1) as u64) * max_inflight; + peer_configs[i].connection_opts.soft_max_clients_per_host = + ((num_peers + 1) as u64) * max_inflight; + peer_configs[i].connection_opts.num_neighbors = (num_peers + 1) as u64; + peer_configs[i].connection_opts.soft_num_neighbors = (num_peers + 1) as u64; + } + + let initial_balances = vec![( + PrincipalData::from(peer_configs[0].spending_account.origin_address().unwrap()), + 1000000, + )]; + + for i in 0..peer_configs.len() { + peer_configs[i].initial_balances = initial_balances.clone(); + } + + // connectivity + let peer_0 = peer_configs[0].to_neighbor(); + for i in 1..peer_configs.len() { + peer_configs[i].add_neighbor(&peer_0); + let peer_i = peer_configs[i].to_neighbor(); + peer_configs[0].add_neighbor(&peer_i); + } + }, + |num_blocks, ref mut peers| { + let tip = SortitionDB::get_canonical_burn_chain_tip( + &peers[0].sortdb.as_ref().unwrap().conn(), + ) + .unwrap(); + let this_reward_cycle = peers[0] + .config + .burnchain + .block_height_to_reward_cycle(tip.block_height) + .unwrap(); + + // build up block data to replicate + let mut block_data = vec![]; + for _ in 0..num_blocks { + let tip = SortitionDB::get_canonical_burn_chain_tip( + &peers[0].sortdb.as_ref().unwrap().conn(), + ) + .unwrap(); + if peers[0] + .config + .burnchain + .block_height_to_reward_cycle(tip.block_height) + .unwrap() + != this_reward_cycle + { + continue; + } + let (mut burn_ops, stacks_block, microblocks) = peers[0].make_default_tenure(); + + let (_, burn_header_hash, consensus_hash) = + peers[0].next_burnchain_block(burn_ops.clone()); + peers[0].process_stacks_epoch_at_tip(&stacks_block, µblocks); + + TestPeer::set_ops_burn_header_hash(&mut burn_ops, &burn_header_hash); + + for i in 1..peers.len() { + peers[i].next_burnchain_block_raw(burn_ops.clone()); + } + + let sn = SortitionDB::get_canonical_burn_chain_tip( + &peers[0].sortdb.as_ref().unwrap().conn(), + ) + .unwrap(); + + block_data.push(( + sn.consensus_hash.clone(), + Some(stacks_block), + Some(microblocks), + )); + } + *blocks_and_microblocks.borrow_mut() = block_data + .clone() + .drain(..) + .map(|(ch, blk_opt, mblocks_opt)| (ch, blk_opt.unwrap(), mblocks_opt.unwrap())) + .collect(); + block_data + }, + |ref mut peers| { + for peer in peers.iter_mut() { + // force peers to keep trying to process buffered data + peer.network.burnchain_tip.burn_header_hash = BurnchainHeaderHash([0u8; 32]); + } + + let done_flag = *done.borrow(); + + let mut connectivity_0_to_n = HashSet::new(); + let mut connectivity_n_to_0 = HashSet::new(); + + let peer_0_nk = peers[0].to_neighbor().addr; + + for (nk, event_id) in peers[0].network.events.iter() { + if let Some(convo) = peers[0].network.peers.get(event_id) { + if convo.is_authenticated() { + connectivity_0_to_n.insert(nk.clone()); + } + } + } + for i in 1..peers.len() { + for (nk, event_id) in peers[i].network.events.iter() { + if *nk != peer_0_nk { + continue; + } + + if let Some(convo) = peers[i].network.peers.get(event_id) { + if convo.is_authenticated() { + if let Some(inv_state) = &peers[i].network.inv_state { + if let Some(inv_stats) = inv_state.block_stats.get(&peer_0_nk) { + if inv_stats.inv.num_reward_cycles >= 5 { + connectivity_n_to_0.insert(peers[i].to_neighbor().addr); + } + } + } + } + } + } + } + + if connectivity_0_to_n.len() < peers.len() - 1 + || connectivity_n_to_0.len() < peers.len() - 1 + { + test_debug!( + "Network not connected: 0 --> N = {}, N --> 0 = {}", + connectivity_0_to_n.len(), + connectivity_n_to_0.len() + ); + return; + } + + let ((tip_consensus_hash, tip_block, _), idx) = { + let block_data = blocks_and_microblocks.borrow(); + let idx = blocks_idx.borrow(); + (block_data[(*idx as usize).saturating_sub(1)].clone(), *idx) + }; + + if idx > 0 { + let mut caught_up = true; + for i in 1..peers.len() { + peers[i] + .with_db_state(|sortdb, chainstate, relayer, mempool| { + let (canonical_consensus_hash, canonical_block_hash) = + SortitionDB::get_canonical_stacks_chain_tip_hash(sortdb.conn()) + .unwrap(); + + if canonical_consensus_hash != tip_consensus_hash + || canonical_block_hash != tip_block.block_hash() + { + debug!( + "Peer {} is not caught up yet (at {}/{}, need {}/{})", + i + 1, + &canonical_consensus_hash, + &canonical_block_hash, + &tip_consensus_hash, + &tip_block.block_hash() + ); + caught_up = false; + } + Ok(()) + }) + .unwrap(); + } + if !caught_up { + return; + } + } + + // caught up! + // find next block + let ((consensus_hash, block, microblocks), idx) = { + let block_data = blocks_and_microblocks.borrow(); + let mut idx = blocks_idx.borrow_mut(); + if *idx >= block_data.len() { + test_debug!("Out of blocks and microblocks to push"); + return; + } + + let ret = block_data[*idx].clone(); + *idx += 1; + (ret, *idx) + }; + + if !done_flag { + test_debug!( + "Broadcast block {}/{} and microblocks (idx = {})", + &consensus_hash, + block.block_hash(), + idx + ); + + let block_hash = block.block_hash(); + + // create a transaction against the current + // (anchored) chain tip + let tx = make_test_smart_contract_transaction( + &mut peers[0], + &format!("test-contract-{}", &block_hash.to_hex()[0..10]), + &tip_consensus_hash, + &tip_block.block_hash(), + ); + + let mut expected_txs = sent_txs.borrow_mut(); + expected_txs.push(tx.clone()); + + test_debug!( + "Broadcast {}/{} and its microblocks", + &consensus_hash, + &block.block_hash() + ); + // next block + broadcast_block(&mut peers[0], vec![], consensus_hash.clone(), block); + broadcast_microblocks( + &mut peers[0], + vec![], + consensus_hash, + block_hash, + microblocks, + ); + + // NOTE: first transaction will be dropped since the other nodes haven't + // processed the first-ever Stacks block when their relayer code gets + // around to considering it. + broadcast_transaction(&mut peers[0], vec![], tx); + } else { + test_debug!("Done pushing data"); + } + }, + |ref peer| { + // check peer health -- no message errors + // (i.e. no relay cycles) + for (_, convo) in peer.network.peers.iter() { + assert_eq!(convo.stats.msgs_err, 0); + } + true + }, + |ref mut peers| { + // all blocks downloaded. only stop if peer 1 has + // all the transactions + let mut done_flag = done.borrow_mut(); + *done_flag = true; + + let mut ret = true; + for i in 1..peers.len() { + let txs = + MemPoolDB::get_all_txs(peers[1].mempool.as_ref().unwrap().conn()).unwrap(); + test_debug!("Peer {} has {} txs", i + 1, txs.len()); + ret = ret && txs.len() == sent_txs.borrow().len() - 1; + } + ret + }, + ); + + // peers 1..n should have all the transactions + let blocks_and_microblocks = blocks_and_microblocks.into_inner(); + let expected_txs = sent_txs.into_inner(); + + for i in 1..peers.len() { + let txs = MemPoolDB::get_all_txs(peers[i].mempool.as_ref().unwrap().conn()).unwrap(); + for tx in txs.iter() { + let mut found = false; + for expected_tx in expected_txs.iter() { + if tx.tx.txid() == expected_tx.txid() { + found = true; + break; + } + } + if !found { + panic!("Transaction not found: {:?}", &tx.tx); + } + } + + // peers 1..n should have 1 tx per chain tip (except for the first block) + for ((consensus_hash, block, _), sent_tx) in + blocks_and_microblocks.iter().zip(expected_txs[1..].iter()) + { + let block_hash = block.block_hash(); + let tx_infos = MemPoolDB::get_txs_after( + peers[i].mempool.as_ref().unwrap().conn(), + consensus_hash, + &block_hash, + 0, + 1000, + ) + .unwrap(); + assert_eq!(tx_infos.len(), 1); + assert_eq!(tx_infos[0].tx.txid(), sent_tx.txid()); + } + } + }) +} + +#[test] +#[ignore] +fn test_get_blocks_and_microblocks_2_peers_antientropy() { + with_timeout(600, move || { + run_get_blocks_and_microblocks( + "test_get_blocks_and_microblocks_2_peers_antientropy", + 4240, + 2, + |ref mut peer_configs| { + // build initial network topology. + assert_eq!(peer_configs.len(), 2); + + // peer 0 mines blocks, but does not advertize them nor announce them as + // available via its inventory. It only uses its anti-entropy protocol to + // discover that peer 1 doesn't have them, and sends them to peer 1 that way. + peer_configs[0].connection_opts.disable_block_advertisement = true; + peer_configs[0].connection_opts.disable_block_download = true; + + peer_configs[1].connection_opts.disable_block_download = true; + peer_configs[1].connection_opts.disable_block_advertisement = true; + + // disable nat punches -- disconnect/reconnect + // clears inv state + peer_configs[0].connection_opts.disable_natpunch = true; + peer_configs[1].connection_opts.disable_natpunch = true; + + // permit anti-entropy protocol even if nat'ed + peer_configs[0].connection_opts.antientropy_public = true; + peer_configs[1].connection_opts.antientropy_public = true; + peer_configs[0].connection_opts.antientropy_retry = 1; + peer_configs[1].connection_opts.antientropy_retry = 1; + + // make peer 0 go slowly + peer_configs[0].connection_opts.max_block_push = 2; + peer_configs[0].connection_opts.max_microblock_push = 2; + + let peer_0 = peer_configs[0].to_neighbor(); + let peer_1 = peer_configs[1].to_neighbor(); + + // peer 0 is inbound to peer 1 + peer_configs[0].add_neighbor(&peer_1); + peer_configs[1].add_neighbor(&peer_0); + }, + |num_blocks, ref mut peers| { + let tip = SortitionDB::get_canonical_burn_chain_tip( + &peers[0].sortdb.as_ref().unwrap().conn(), + ) + .unwrap(); + let this_reward_cycle = peers[0] + .config + .burnchain + .block_height_to_reward_cycle(tip.block_height) + .unwrap(); + + // build up block data to replicate + let mut block_data = vec![]; + for _ in 0..num_blocks { + let tip = SortitionDB::get_canonical_burn_chain_tip( + &peers[0].sortdb.as_ref().unwrap().conn(), + ) + .unwrap(); + if peers[0] + .config + .burnchain + .block_height_to_reward_cycle(tip.block_height) + .unwrap() + != this_reward_cycle + { + continue; + } + let (mut burn_ops, stacks_block, microblocks) = peers[0].make_default_tenure(); + + let (_, burn_header_hash, consensus_hash) = + peers[0].next_burnchain_block(burn_ops.clone()); + peers[0].process_stacks_epoch_at_tip(&stacks_block, µblocks); + + TestPeer::set_ops_burn_header_hash(&mut burn_ops, &burn_header_hash); + + for i in 1..peers.len() { + peers[i].next_burnchain_block_raw(burn_ops.clone()); + } + + let sn = SortitionDB::get_canonical_burn_chain_tip( + &peers[0].sortdb.as_ref().unwrap().conn(), + ) + .unwrap(); + block_data.push(( + sn.consensus_hash.clone(), + Some(stacks_block), + Some(microblocks), + )); + } + + // cap with an empty sortition, so the antientropy protocol picks up all stacks + // blocks + let (_, burn_header_hash, consensus_hash) = peers[0].next_burnchain_block(vec![]); + for i in 1..peers.len() { + peers[i].next_burnchain_block_raw(vec![]); + } + let sn = SortitionDB::get_canonical_burn_chain_tip( + &peers[0].sortdb.as_ref().unwrap().conn(), + ) + .unwrap(); + block_data.push((sn.consensus_hash.clone(), None, None)); + + block_data + }, + |ref mut peers| { + for peer in peers.iter_mut() { + // force peers to keep trying to process buffered data + peer.network.burnchain_tip.burn_header_hash = BurnchainHeaderHash([0u8; 32]); + } + + let tip_opt = peers[1] + .with_db_state(|sortdb, chainstate, _, _| { + let tip_opt = + NakamotoChainState::get_canonical_block_header(chainstate.db(), sortdb) + .unwrap(); + Ok(tip_opt) + }) + .unwrap(); + }, + |ref peer| { + // check peer health + // nothing should break + // TODO + true + }, + |_| true, + ); + }) +} + +#[test] +#[ignore] +fn test_get_blocks_and_microblocks_2_peers_buffered_messages() { + with_timeout(600, move || { + let sortitions = RefCell::new(vec![]); + let blocks_and_microblocks = RefCell::new(vec![]); + let idx = RefCell::new(0usize); + let pushed_idx = RefCell::new(0usize); + run_get_blocks_and_microblocks( + "test_get_blocks_and_microblocks_2_peers_buffered_messages", + 4242, + 2, + |ref mut peer_configs| { + // build initial network topology. + assert_eq!(peer_configs.len(), 2); + + // peer 0 mines blocks, but it does not present its inventory. + peer_configs[0].connection_opts.disable_inv_chat = true; + peer_configs[0].connection_opts.disable_block_download = true; + + peer_configs[1].connection_opts.disable_block_download = true; + peer_configs[1].connection_opts.disable_block_advertisement = true; + + // disable nat punches -- disconnect/reconnect + // clears inv state + peer_configs[0].connection_opts.disable_natpunch = true; + peer_configs[1].connection_opts.disable_natpunch = true; + + // peer 0 ignores peer 1's handshakes + peer_configs[0].connection_opts.disable_inbound_handshakes = true; + + // disable anti-entropy + peer_configs[0].connection_opts.max_block_push = 0; + peer_configs[0].connection_opts.max_microblock_push = 0; + + let peer_0 = peer_configs[0].to_neighbor(); + let peer_1 = peer_configs[1].to_neighbor(); + + // peer 0 is inbound to peer 1 + peer_configs[0].add_neighbor(&peer_1); + peer_configs[1].add_neighbor(&peer_0); + }, + |num_blocks, ref mut peers| { + let tip = SortitionDB::get_canonical_burn_chain_tip( + &peers[0].sortdb.as_ref().unwrap().conn(), + ) + .unwrap(); + let this_reward_cycle = peers[0] + .config + .burnchain + .block_height_to_reward_cycle(tip.block_height) + .unwrap(); + + // build up block data to replicate + let mut block_data = vec![]; + for block_num in 0..num_blocks { + let tip = SortitionDB::get_canonical_burn_chain_tip( + &peers[0].sortdb.as_ref().unwrap().conn(), + ) + .unwrap(); + let (mut burn_ops, stacks_block, microblocks) = peers[0].make_default_tenure(); + + let (_, burn_header_hash, consensus_hash) = + peers[0].next_burnchain_block(burn_ops.clone()); + peers[0].process_stacks_epoch_at_tip(&stacks_block, µblocks); + + TestPeer::set_ops_burn_header_hash(&mut burn_ops, &burn_header_hash); + + if block_num == 0 { + for i in 1..peers.len() { + peers[i].next_burnchain_block_raw(burn_ops.clone()); + peers[i].process_stacks_epoch_at_tip(&stacks_block, µblocks); + } + } else { + let mut all_sortitions = sortitions.borrow_mut(); + all_sortitions.push(burn_ops.clone()); + } + + let sn = SortitionDB::get_canonical_burn_chain_tip( + &peers[0].sortdb.as_ref().unwrap().conn(), + ) + .unwrap(); + block_data.push(( + sn.consensus_hash.clone(), + Some(stacks_block), + Some(microblocks), + )); + } + *blocks_and_microblocks.borrow_mut() = block_data.clone()[1..] + .to_vec() + .drain(..) + .map(|(ch, blk_opt, mblocks_opt)| (ch, blk_opt.unwrap(), mblocks_opt.unwrap())) + .collect(); + block_data + }, + |ref mut peers| { + for peer in peers.iter_mut() { + // force peers to keep trying to process buffered data + peer.network.burnchain_tip.burn_header_hash = BurnchainHeaderHash([0u8; 32]); + } + + let mut i = idx.borrow_mut(); + let mut pushed_i = pushed_idx.borrow_mut(); + let all_sortitions = sortitions.borrow(); + let all_blocks_and_microblocks = blocks_and_microblocks.borrow(); + let peer_0_nk = peers[0].to_neighbor().addr; + let peer_1_nk = peers[1].to_neighbor().addr; + + let tip_opt = peers[1] + .with_db_state(|sortdb, chainstate, _, _| { + let tip_opt = + NakamotoChainState::get_canonical_block_header(chainstate.db(), sortdb) + .unwrap(); + Ok(tip_opt) + }) + .unwrap(); + + if !is_peer_connected(&peers[0], &peer_1_nk) { + debug!("Peer 0 not connected to peer 1"); + return; + } + + if let Some(tip) = tip_opt { + debug!( + "Push at {}, need {}", + tip.anchored_header.height() + - peers[1].config.burnchain.first_block_height + - 1, + *pushed_i + ); + if tip.anchored_header.height() + - peers[1].config.burnchain.first_block_height + - 1 + == *pushed_i as u64 + { + // next block + push_block( + &mut peers[0], + &peer_1_nk, + vec![], + (*all_blocks_and_microblocks)[*pushed_i].0.clone(), + (*all_blocks_and_microblocks)[*pushed_i].1.clone(), + ); + push_microblocks( + &mut peers[0], + &peer_1_nk, + vec![], + (*all_blocks_and_microblocks)[*pushed_i].0.clone(), + (*all_blocks_and_microblocks)[*pushed_i].1.block_hash(), + (*all_blocks_and_microblocks)[*pushed_i].2.clone(), + ); + *pushed_i += 1; + } + debug!( + "Sortition at {}, need {}", + tip.anchored_header.height() + - peers[1].config.burnchain.first_block_height + - 1, + *i + ); + if tip.anchored_header.height() + - peers[1].config.burnchain.first_block_height + - 1 + == *i as u64 + { + let event_id = { + let mut ret = 0; + for (nk, event_id) in peers[1].network.events.iter() { + ret = *event_id; + break; + } + if ret == 0 { + return; + } + ret + }; + let mut update_sortition = false; + for (event_id, pending) in peers[1].network.pending_messages.iter() { + debug!("Pending at {} is ({}, {})", *i, event_id, pending.len()); + if pending.len() >= 1 { + update_sortition = true; + } + } + if update_sortition { + debug!("Advance sortition!"); + peers[1].next_burnchain_block_raw((*all_sortitions)[*i].clone()); + *i += 1; + } + } + } + }, + |ref peer| { + // check peer health + // nothing should break + // TODO + true + }, + |_| true, + ); + }) +} + +pub fn make_contract_tx( + sender: &StacksPrivateKey, + cur_nonce: u64, + tx_fee: u64, + name: &str, + contract: &str, +) -> StacksTransaction { + let sender_spending_condition = + TransactionSpendingCondition::new_singlesig_p2pkh(StacksPublicKey::from_private(sender)) + .expect("Failed to create p2pkh spending condition from public key."); + + let spending_auth = TransactionAuth::Standard(sender_spending_condition); + + let mut tx_contract = StacksTransaction::new( + TransactionVersion::Testnet, + spending_auth.clone(), + TransactionPayload::new_smart_contract(&name.to_string(), &contract.to_string(), None) + .unwrap(), + ); + + tx_contract.chain_id = 0x80000000; + tx_contract.auth.set_origin_nonce(cur_nonce); + tx_contract.set_tx_fee(tx_fee); + + let mut tx_signer = StacksTransactionSigner::new(&tx_contract); + tx_signer.sign_origin(sender).unwrap(); + + let tx_contract_signed = tx_signer.get_tx().unwrap(); + tx_contract_signed +} + +#[test] +fn test_static_problematic_tests() { + let spender_sk_1 = StacksPrivateKey::new(); + let spender_sk_2 = StacksPrivateKey::new(); + let spender_sk_3 = StacksPrivateKey::new(); + + let edge_repeat_factor = AST_CALL_STACK_DEPTH_BUFFER + (MAX_CALL_STACK_DEPTH as u64) - 1; + let tx_edge_body_start = "{ a : ".repeat(edge_repeat_factor as usize); + let tx_edge_body_end = "} ".repeat(edge_repeat_factor as usize); + let tx_edge_body = format!("{}u1 {}", tx_edge_body_start, tx_edge_body_end); + + let tx_edge = make_contract_tx( + &spender_sk_1, + 0, + (tx_edge_body.len() * 100) as u64, + "test-edge", + &tx_edge_body, + ); + + // something just over the limit of the expression depth + let exceeds_repeat_factor = edge_repeat_factor + 1; + let tx_exceeds_body_start = "{ a : ".repeat(exceeds_repeat_factor as usize); + let tx_exceeds_body_end = "} ".repeat(exceeds_repeat_factor as usize); + let tx_exceeds_body = format!("{}u1 {}", tx_exceeds_body_start, tx_exceeds_body_end); + + let tx_exceeds = make_contract_tx( + &spender_sk_2, + 0, + (tx_exceeds_body.len() * 100) as u64, + "test-exceeds", + &tx_exceeds_body, + ); + + // something stupidly high over the expression depth + let high_repeat_factor = 128 * 1024; + let tx_high_body_start = "{ a : ".repeat(high_repeat_factor as usize); + let tx_high_body_end = "} ".repeat(high_repeat_factor as usize); + let tx_high_body = format!("{}u1 {}", tx_high_body_start, tx_high_body_end); + + let tx_high = make_contract_tx( + &spender_sk_3, + 0, + (tx_high_body.len() * 100) as u64, + "test-high", + &tx_high_body, + ); + assert!(Relayer::static_check_problematic_relayed_tx( + false, + StacksEpochId::Epoch2_05, + &tx_edge, + ASTRules::Typical + ) + .is_ok()); + assert!(Relayer::static_check_problematic_relayed_tx( + false, + StacksEpochId::Epoch2_05, + &tx_exceeds, + ASTRules::Typical + ) + .is_ok()); + assert!(Relayer::static_check_problematic_relayed_tx( + false, + StacksEpochId::Epoch2_05, + &tx_high, + ASTRules::Typical + ) + .is_ok()); + + assert!(Relayer::static_check_problematic_relayed_tx( + false, + StacksEpochId::Epoch2_05, + &tx_edge, + ASTRules::Typical + ) + .is_ok()); + assert!(!Relayer::static_check_problematic_relayed_tx( + false, + StacksEpochId::Epoch2_05, + &tx_exceeds, + ASTRules::PrecheckSize + ) + .is_ok()); + assert!(!Relayer::static_check_problematic_relayed_tx( + false, + StacksEpochId::Epoch2_05, + &tx_high, + ASTRules::PrecheckSize + ) + .is_ok()); +} + +#[test] +fn process_new_blocks_rejects_problematic_asts() { + let privk = StacksPrivateKey::from_hex( + "42faca653724860da7a41bfcef7e6ba78db55146f6900de8cb2a9f760ffac70c01", + ) + .unwrap(); + let addr = StacksAddress::from_public_keys( + C32_ADDRESS_VERSION_TESTNET_SINGLESIG, + &AddressHashMode::SerializeP2PKH, + 1, + &vec![StacksPublicKey::from_private(&privk)], + ) + .unwrap(); + + let initial_balances = vec![(addr.to_account_principal(), 100000000000)]; + + let mut peer_config = TestPeerConfig::new(function_name!(), 32019, 32020); + peer_config.initial_balances = initial_balances; + peer_config.epochs = Some(vec![ + StacksEpoch { + epoch_id: StacksEpochId::Epoch20, + start_height: 0, + end_height: 1, + block_limit: ExecutionCost::max_value(), + network_epoch: PEER_VERSION_EPOCH_2_0, + }, + StacksEpoch { + epoch_id: StacksEpochId::Epoch2_05, + start_height: 1, + end_height: i64::MAX as u64, + block_limit: ExecutionCost::max_value(), + network_epoch: PEER_VERSION_EPOCH_2_05, + }, + ]); + let burnchain = peer_config.burnchain.clone(); + + // activate new AST rules right away + let mut peer = TestPeer::new(peer_config); + let mut sortdb = peer.sortdb.take().unwrap(); + { + let mut tx = sortdb + .tx_begin() + .expect("FATAL: failed to begin tx on sortition DB"); + SortitionDB::override_ast_rule_height(&mut tx, ASTRules::PrecheckSize, 1) + .expect("FATAL: failed to override AST PrecheckSize rule height"); + tx.commit() + .expect("FATAL: failed to commit sortition DB transaction"); + } + peer.sortdb = Some(sortdb); + + let chainstate_path = peer.chainstate_path.clone(); + + let first_stacks_block_height = { + let sn = SortitionDB::get_canonical_burn_chain_tip(&peer.sortdb.as_ref().unwrap().conn()) + .unwrap(); + sn.block_height + }; + + let recipient_addr_str = "ST1RFD5Q2QPK3E0F08HG9XDX7SSC7CNRS0QR0SGEV"; + let recipient = StacksAddress::from_string(recipient_addr_str).unwrap(); + + let high_repeat_factor = 128 * 1024; + let tx_high_body_start = "{ a : ".repeat(high_repeat_factor as usize); + let tx_high_body_end = "} ".repeat(high_repeat_factor as usize); + let tx_high_body = format!("{}u1 {}", tx_high_body_start, tx_high_body_end); + + let bad_tx = make_contract_tx( + &privk, + 0, + (tx_high_body.len() * 100) as u64, + "test-high", + &tx_high_body, + ); + let bad_txid = bad_tx.txid(); + let bad_tx_len = { + let mut bytes = vec![]; + bad_tx.consensus_serialize(&mut bytes).unwrap(); + bytes.len() as u64 + }; + + let tip = + SortitionDB::get_canonical_burn_chain_tip(&peer.sortdb.as_ref().unwrap().conn()).unwrap(); + + let mblock_privk = StacksPrivateKey::new(); + + // make one tenure with a valid block, but problematic microblocks + let (burn_ops, block, microblocks) = peer.make_tenure( + |ref mut miner, + ref mut sortdb, + ref mut chainstate, + vrf_proof, + ref parent_opt, + ref parent_microblock_header_opt| { + let parent_tip = match parent_opt { + None => StacksChainState::get_genesis_header_info(chainstate.db()).unwrap(), + Some(block) => { + let ic = sortdb.index_conn(); + let snapshot = SortitionDB::get_block_snapshot_for_winning_stacks_block( + &ic, + &tip.sortition_id, + &block.block_hash(), + ) + .unwrap() + .unwrap(); // succeeds because we don't fork + StacksChainState::get_anchored_block_header_info( + chainstate.db(), + &snapshot.consensus_hash, + &snapshot.winning_stacks_block_hash, + ) + .unwrap() + .unwrap() + } + }; + + let parent_header_hash = parent_tip.anchored_header.block_hash(); + let parent_consensus_hash = parent_tip.consensus_hash.clone(); + let coinbase_tx = make_coinbase(miner, 0); + + let block_builder = StacksBlockBuilder::make_regtest_block_builder( + &burnchain, + &parent_tip, + vrf_proof.clone(), + tip.total_burn, + Hash160::from_node_public_key(&StacksPublicKey::from_private(&mblock_privk)), + ) + .unwrap(); + + let block = StacksBlockBuilder::make_anchored_block_from_txs( + block_builder, + chainstate, + &sortdb.index_handle(&tip.sortition_id), + vec![coinbase_tx.clone()], + ) + .unwrap() + .0; + + (block, vec![]) + }, + ); + + let (_, _, consensus_hash) = peer.next_burnchain_block(burn_ops.clone()); + peer.process_stacks_epoch(&block, &consensus_hash, &vec![]); + + let tip = + SortitionDB::get_canonical_burn_chain_tip(&peer.sortdb.as_ref().unwrap().conn()).unwrap(); + + let (burn_ops, bad_block, mut microblocks) = peer.make_tenure( + |ref mut miner, + ref mut sortdb, + ref mut chainstate, + vrf_proof, + ref parent_opt, + ref parent_microblock_header_opt| { + let parent_tip = match parent_opt { + None => StacksChainState::get_genesis_header_info(chainstate.db()).unwrap(), + Some(block) => { + let ic = sortdb.index_conn(); + let snapshot = SortitionDB::get_block_snapshot_for_winning_stacks_block( + &ic, + &tip.sortition_id, + &block.block_hash(), + ) + .unwrap() + .unwrap(); // succeeds because we don't fork + StacksChainState::get_anchored_block_header_info( + chainstate.db(), + &snapshot.consensus_hash, + &snapshot.winning_stacks_block_hash, + ) + .unwrap() + .unwrap() + } + }; + + let parent_header_hash = parent_tip.anchored_header.block_hash(); + let parent_consensus_hash = parent_tip.consensus_hash.clone(); + let parent_index_hash = StacksBlockHeader::make_index_block_hash( + &parent_consensus_hash, + &parent_header_hash, + ); + let coinbase_tx = make_coinbase(miner, 0); + + let mblock_privk = miner.next_microblock_privkey(); + let block_builder = StacksBlockBuilder::make_regtest_block_builder( + &burnchain, + &parent_tip, + vrf_proof.clone(), + tip.total_burn, + Hash160::from_node_public_key(&StacksPublicKey::from_private(&mblock_privk)), + ) + .unwrap(); + + // this tx would be problematic without our checks + if let Err(ChainstateError::ProblematicTransaction(txid)) = + StacksBlockBuilder::make_anchored_block_from_txs( + block_builder, + chainstate, + &sortdb.index_handle(&tip.sortition_id), + vec![coinbase_tx.clone(), bad_tx.clone()], + ) + { + assert_eq!(txid, bad_txid); + } else { + panic!("Did not get Error::ProblematicTransaction"); + } + + // make a bad block anyway + // don't worry about the state root + let block_builder = StacksBlockBuilder::make_regtest_block_builder( + &burnchain, + &parent_tip, + vrf_proof.clone(), + tip.total_burn, + Hash160::from_node_public_key(&StacksPublicKey::from_private(&mblock_privk)), + ) + .unwrap(); + let bad_block = StacksBlockBuilder::make_anchored_block_from_txs( + block_builder, + chainstate, + &sortdb.index_handle(&tip.sortition_id), + vec![coinbase_tx.clone()], + ) + .unwrap(); + + let mut bad_block = bad_block.0; + bad_block.txs.push(bad_tx.clone()); + + let txid_vecs = bad_block + .txs + .iter() + .map(|tx| tx.txid().as_bytes().to_vec()) + .collect(); + + let merkle_tree = MerkleTree::::new(&txid_vecs); + bad_block.header.tx_merkle_root = merkle_tree.root(); + + chainstate + .reload_unconfirmed_state( + &sortdb.index_handle(&tip.sortition_id), + parent_index_hash.clone(), + ) + .unwrap(); + + // make a bad microblock + let iconn = &sortdb.index_handle(&tip.sortition_id); + let mut microblock_builder = StacksMicroblockBuilder::new( + parent_header_hash.clone(), + parent_consensus_hash.clone(), + chainstate, + iconn, + BlockBuilderSettings::max_value(), + ) + .unwrap(); + + // miner should fail with just the bad tx, since it's problematic + let mblock_err = microblock_builder + .mine_next_microblock_from_txs(vec![(bad_tx.clone(), bad_tx_len)], &mblock_privk) + .unwrap_err(); + if let ChainstateError::NoTransactionsToMine = mblock_err { + } else { + panic!("Did not get NoTransactionsToMine"); + } + + let token_transfer = + make_user_stacks_transfer(&privk, 0, 200, &recipient.to_account_principal(), 123); + let tt_len = { + let mut bytes = vec![]; + token_transfer.consensus_serialize(&mut bytes).unwrap(); + bytes.len() as u64 + }; + + let mut bad_mblock = microblock_builder + .mine_next_microblock_from_txs( + vec![(token_transfer, tt_len), (bad_tx.clone(), bad_tx_len)], + &mblock_privk, + ) + .unwrap(); + + // miner shouldn't include the bad tx, since it's problematic + assert_eq!(bad_mblock.txs.len(), 1); + bad_mblock.txs.push(bad_tx.clone()); + + // force it in anyway + let txid_vecs = bad_mblock + .txs + .iter() + .map(|tx| tx.txid().as_bytes().to_vec()) + .collect(); + + let merkle_tree = MerkleTree::::new(&txid_vecs); + bad_mblock.header.tx_merkle_root = merkle_tree.root(); + bad_mblock.sign(&mblock_privk).unwrap(); + + (bad_block, vec![bad_mblock]) + }, + ); + + let bad_mblock = microblocks.pop().unwrap(); + let (_, _, new_consensus_hash) = peer.next_burnchain_block(burn_ops.clone()); + peer.process_stacks_epoch(&bad_block, &new_consensus_hash, &vec![]); + + // stuff them all into each possible field of NetworkResult + // p2p messages + let nk = NeighborKey { + peer_version: 1, + network_id: 2, + addrbytes: PeerAddress([3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18]), + port: 19, + }; + let preamble = Preamble { + peer_version: 1, + network_id: 2, + seq: 3, + burn_block_height: 4, + burn_block_hash: BurnchainHeaderHash([5u8; 32]), + burn_stable_block_height: 6, + burn_stable_block_hash: BurnchainHeaderHash([7u8; 32]), + additional_data: 8, + signature: MessageSignature([9u8; 65]), + payload_len: 10, + }; + let bad_msgs = vec![ + StacksMessage { + preamble: preamble.clone(), + relayers: vec![], + payload: StacksMessageType::Blocks(BlocksData { + blocks: vec![BlocksDatum(new_consensus_hash.clone(), bad_block.clone())], + }), + }, + StacksMessage { + preamble: preamble.clone(), + relayers: vec![], + payload: StacksMessageType::Microblocks(MicroblocksData { + index_anchor_block: StacksBlockId::new( + &new_consensus_hash, + &bad_block.block_hash(), + ), + microblocks: vec![bad_mblock.clone()], + }), + }, + StacksMessage { + preamble: preamble.clone(), + relayers: vec![], + payload: StacksMessageType::Transaction(bad_tx.clone()), + }, + ]; + let mut unsolicited = HashMap::new(); + unsolicited.insert(nk.clone(), bad_msgs.clone()); + + let mut network_result = NetworkResult::new( + peer.network.stacks_tip.block_id(), + 0, + 0, + 0, + 0, + 0, + ConsensusHash([0x01; 20]), + HashMap::new(), + ); + network_result.consume_unsolicited(unsolicited); + + assert!(network_result.has_blocks()); + assert!(network_result.has_microblocks()); + assert!(network_result.has_transactions()); + + network_result.consume_http_uploads( + bad_msgs + .into_iter() + .map(|msg| msg.payload) + .collect::>(), + ); + + assert!(network_result.has_blocks()); + assert!(network_result.has_microblocks()); + assert!(network_result.has_transactions()); + + assert_eq!(network_result.uploaded_transactions.len(), 1); + assert_eq!(network_result.uploaded_blocks.len(), 1); + assert_eq!(network_result.uploaded_microblocks.len(), 1); + assert_eq!(network_result.pushed_transactions.len(), 1); + assert_eq!(network_result.pushed_blocks.len(), 1); + assert_eq!(network_result.pushed_microblocks.len(), 1); + + network_result + .blocks + .push((new_consensus_hash.clone(), bad_block.clone(), 123)); + network_result.confirmed_microblocks.push(( + new_consensus_hash.clone(), + vec![bad_mblock.clone()], + 234, + )); + + let mut sortdb = peer.sortdb.take().unwrap(); + let (processed_blocks, processed_mblocks, relay_mblocks, bad_neighbors) = + Relayer::process_new_blocks( + &mut network_result, + &mut sortdb, + &mut peer.stacks_node.as_mut().unwrap().chainstate, + None, + ) + .unwrap(); + + // despite this data showing up in all aspects of the network result, none of it actually + // gets relayed + assert_eq!(processed_blocks.len(), 0); + assert_eq!(processed_mblocks.len(), 0); + assert_eq!(relay_mblocks.len(), 0); + assert_eq!(bad_neighbors.len(), 0); + + let txs_relayed = Relayer::process_transactions( + &mut network_result, + &sortdb, + &mut peer.stacks_node.as_mut().unwrap().chainstate, + &mut peer.mempool.as_mut().unwrap(), + None, + ) + .unwrap(); + assert_eq!(txs_relayed.len(), 0); +} + +#[test] +fn test_block_pay_to_contract_gated_at_v210() { + let mut peer_config = TestPeerConfig::new(function_name!(), 4246, 4247); + let epochs = vec![ + StacksEpoch { + epoch_id: StacksEpochId::Epoch10, + start_height: 0, + end_height: 0, + block_limit: ExecutionCost::max_value(), + network_epoch: PEER_VERSION_EPOCH_1_0, + }, + StacksEpoch { + epoch_id: StacksEpochId::Epoch20, + start_height: 0, + end_height: 0, + block_limit: ExecutionCost::max_value(), + network_epoch: PEER_VERSION_EPOCH_2_0, + }, + StacksEpoch { + epoch_id: StacksEpochId::Epoch2_05, + start_height: 0, + end_height: 28, // NOTE: the first 25 burnchain blocks have no sortition + block_limit: ExecutionCost::max_value(), + network_epoch: PEER_VERSION_EPOCH_2_05, + }, + StacksEpoch { + epoch_id: StacksEpochId::Epoch21, + start_height: 28, + end_height: STACKS_EPOCH_MAX, + block_limit: ExecutionCost::max_value(), + network_epoch: PEER_VERSION_EPOCH_2_1, + }, + ]; + peer_config.epochs = Some(epochs); + let burnchain = peer_config.burnchain.clone(); + + let mut peer = TestPeer::new(peer_config); + + let mut make_tenure = + |miner: &mut TestMiner, + sortdb: &mut SortitionDB, + chainstate: &mut StacksChainState, + vrfproof: VRFProof, + parent_opt: Option<&StacksBlock>, + microblock_parent_opt: Option<&StacksMicroblockHeader>| { + let tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); + + let stacks_tip_opt = + NakamotoChainState::get_canonical_block_header(chainstate.db(), sortdb).unwrap(); + let parent_tip = match stacks_tip_opt { + None => StacksChainState::get_genesis_header_info(chainstate.db()).unwrap(), + Some(header_tip) => { + let ic = sortdb.index_conn(); + let snapshot = SortitionDB::get_block_snapshot_for_winning_stacks_block( + &ic, + &tip.sortition_id, + &header_tip.anchored_header.block_hash(), + ) + .unwrap() + .unwrap(); // succeeds because we don't fork + StacksChainState::get_anchored_block_header_info( + chainstate.db(), + &snapshot.consensus_hash, + &snapshot.winning_stacks_block_hash, + ) + .unwrap() + .unwrap() + } + }; + + let parent_header_hash = parent_tip.anchored_header.block_hash(); + let parent_consensus_hash = parent_tip.consensus_hash.clone(); + let parent_index_hash = StacksBlockHeader::make_index_block_hash( + &parent_consensus_hash, + &parent_header_hash, + ); + + let coinbase_tx = make_coinbase_with_nonce( + miner, + parent_tip.stacks_block_height as usize, + 0, + Some(PrincipalData::Contract( + QualifiedContractIdentifier::parse("ST000000000000000000002AMW42H.bns") + .unwrap(), + )), + ); + + let mut mblock_pubkey_hash_bytes = [0u8; 20]; + mblock_pubkey_hash_bytes.copy_from_slice(&coinbase_tx.txid()[0..20]); + + let builder = StacksBlockBuilder::make_block_builder( + &burnchain, + chainstate.mainnet, + &parent_tip, + vrfproof, + tip.total_burn, + Hash160(mblock_pubkey_hash_bytes), + ) + .unwrap(); + + let anchored_block = StacksBlockBuilder::make_anchored_block_from_txs( + builder, + chainstate, + &sortdb.index_handle(&tip.sortition_id), + vec![coinbase_tx], + ) + .unwrap(); + + (anchored_block.0, vec![]) + }; + + // tenures 26 and 27 should fail, since the block is a pay-to-contract block + // Pay-to-contract should only be supported if the block is in epoch 2.1, which + // activates at tenure 27. + for i in 0..2 { + let (burn_ops, stacks_block, microblocks) = peer.make_tenure(&mut make_tenure); + let (_, _, consensus_hash) = peer.next_burnchain_block(burn_ops.clone()); + + let sortdb = peer.sortdb.take().unwrap(); + let mut node = peer.stacks_node.take().unwrap(); + match Relayer::process_new_anchored_block( + &sortdb.index_conn(), + &mut node.chainstate, + &consensus_hash, + &stacks_block, + 123, + ) { + Ok(x) => { + panic!("Stored pay-to-contract stacks block before epoch 2.1"); + } + Err(chainstate_error::InvalidStacksBlock(_)) => {} + Err(e) => { + panic!("Got unexpected error {:?}", &e); + } + }; + peer.sortdb = Some(sortdb); + peer.stacks_node = Some(node); + } + + // *now* it should succeed, since tenure 28 was in epoch 2.1 + let (burn_ops, stacks_block, microblocks) = peer.make_tenure(&mut make_tenure); + + let (_, _, consensus_hash) = peer.next_burnchain_block(burn_ops.clone()); + + let sortdb = peer.sortdb.take().unwrap(); + let mut node = peer.stacks_node.take().unwrap(); + match Relayer::process_new_anchored_block( + &sortdb.index_conn(), + &mut node.chainstate, + &consensus_hash, + &stacks_block, + 123, + ) { + Ok(x) => { + assert!(x, "Failed to process valid pay-to-contract block"); + } + Err(e) => { + panic!("Got unexpected error {:?}", &e); + } + }; + peer.sortdb = Some(sortdb); + peer.stacks_node = Some(node); +} + +#[test] +fn test_block_versioned_smart_contract_gated_at_v210() { + let mut peer_config = TestPeerConfig::new(function_name!(), 4248, 4249); + + let initial_balances = vec![( + PrincipalData::from(peer_config.spending_account.origin_address().unwrap()), + 1000000, + )]; + + let epochs = vec![ + StacksEpoch { + epoch_id: StacksEpochId::Epoch10, + start_height: 0, + end_height: 0, + block_limit: ExecutionCost::max_value(), + network_epoch: PEER_VERSION_EPOCH_1_0, + }, + StacksEpoch { + epoch_id: StacksEpochId::Epoch20, + start_height: 0, + end_height: 0, + block_limit: ExecutionCost::max_value(), + network_epoch: PEER_VERSION_EPOCH_2_0, + }, + StacksEpoch { + epoch_id: StacksEpochId::Epoch2_05, + start_height: 0, + end_height: 28, // NOTE: the first 25 burnchain blocks have no sortition + block_limit: ExecutionCost::max_value(), + network_epoch: PEER_VERSION_EPOCH_2_05, + }, + StacksEpoch { + epoch_id: StacksEpochId::Epoch21, + start_height: 28, + end_height: STACKS_EPOCH_MAX, + block_limit: ExecutionCost::max_value(), + network_epoch: PEER_VERSION_EPOCH_2_1, + }, + ]; + + peer_config.epochs = Some(epochs); + peer_config.initial_balances = initial_balances; + let burnchain = peer_config.burnchain.clone(); + + let mut peer = TestPeer::new(peer_config); + + let mut make_tenure = + |miner: &mut TestMiner, + sortdb: &mut SortitionDB, + chainstate: &mut StacksChainState, + vrfproof: VRFProof, + parent_opt: Option<&StacksBlock>, + microblock_parent_opt: Option<&StacksMicroblockHeader>| { + let tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); + + let stacks_tip_opt = + NakamotoChainState::get_canonical_block_header(chainstate.db(), sortdb).unwrap(); + let parent_tip = match stacks_tip_opt { + None => StacksChainState::get_genesis_header_info(chainstate.db()).unwrap(), + Some(header_tip) => { + let ic = sortdb.index_conn(); + let snapshot = SortitionDB::get_block_snapshot_for_winning_stacks_block( + &ic, + &tip.sortition_id, + &header_tip.anchored_header.block_hash(), + ) + .unwrap() + .unwrap(); // succeeds because we don't fork + StacksChainState::get_anchored_block_header_info( + chainstate.db(), + &snapshot.consensus_hash, + &snapshot.winning_stacks_block_hash, + ) + .unwrap() + .unwrap() + } + }; + + let parent_header_hash = parent_tip.anchored_header.block_hash(); + let parent_consensus_hash = parent_tip.consensus_hash.clone(); + let parent_index_hash = StacksBlockHeader::make_index_block_hash( + &parent_consensus_hash, + &parent_header_hash, + ); + + let coinbase_tx = + make_coinbase_with_nonce(miner, parent_tip.stacks_block_height as usize, 0, None); + + let versioned_contract = make_smart_contract_with_version( + miner, + 1, + tip.block_height.try_into().unwrap(), + 0, + Some(ClarityVersion::Clarity1), + Some(1000), + ); + + let mut mblock_pubkey_hash_bytes = [0u8; 20]; + mblock_pubkey_hash_bytes.copy_from_slice(&coinbase_tx.txid()[0..20]); + + let builder = StacksBlockBuilder::make_block_builder( + &burnchain, + chainstate.mainnet, + &parent_tip, + vrfproof, + tip.total_burn, + Hash160(mblock_pubkey_hash_bytes), + ) + .unwrap(); + + let anchored_block = StacksBlockBuilder::make_anchored_block_from_txs( + builder, + chainstate, + &sortdb.index_handle(&tip.sortition_id), + vec![coinbase_tx, versioned_contract], + ) + .unwrap(); + + eprintln!("{:?}", &anchored_block.0); + (anchored_block.0, vec![]) + }; + + // tenures 26 and 27 should fail, since the block contains a versioned smart contract. + // Versioned smart contracts should only be supported if the block is in epoch 2.1, which + // activates at tenure 27. + for i in 0..2 { + let (burn_ops, stacks_block, microblocks) = peer.make_tenure(&mut make_tenure); + let (_, _, consensus_hash) = peer.next_burnchain_block(burn_ops.clone()); + + let sortdb = peer.sortdb.take().unwrap(); + let mut node = peer.stacks_node.take().unwrap(); + match Relayer::process_new_anchored_block( + &sortdb.index_conn(), + &mut node.chainstate, + &consensus_hash, + &stacks_block, + 123, + ) { + Ok(x) => { + eprintln!("{:?}", &stacks_block); + panic!("Stored pay-to-contract stacks block before epoch 2.1"); + } + Err(chainstate_error::InvalidStacksBlock(_)) => {} + Err(e) => { + panic!("Got unexpected error {:?}", &e); + } + }; + peer.sortdb = Some(sortdb); + peer.stacks_node = Some(node); + } + + // *now* it should succeed, since tenure 28 was in epoch 2.1 + let (burn_ops, stacks_block, microblocks) = peer.make_tenure(&mut make_tenure); + + let (_, _, consensus_hash) = peer.next_burnchain_block(burn_ops.clone()); + + let sortdb = peer.sortdb.take().unwrap(); + let mut node = peer.stacks_node.take().unwrap(); + match Relayer::process_new_anchored_block( + &sortdb.index_conn(), + &mut node.chainstate, + &consensus_hash, + &stacks_block, + 123, + ) { + Ok(x) => { + assert!(x, "Failed to process valid versioned smart contract block"); + } + Err(e) => { + panic!("Got unexpected error {:?}", &e); + } + }; + peer.sortdb = Some(sortdb); + peer.stacks_node = Some(node); +} + +#[test] +fn test_block_versioned_smart_contract_mempool_rejection_until_v210() { + let mut peer_config = TestPeerConfig::new(function_name!(), 4250, 4251); + + let initial_balances = vec![( + PrincipalData::from(peer_config.spending_account.origin_address().unwrap()), + 1000000, + )]; + + let epochs = vec![ + StacksEpoch { + epoch_id: StacksEpochId::Epoch10, + start_height: 0, + end_height: 0, + block_limit: ExecutionCost::max_value(), + network_epoch: PEER_VERSION_EPOCH_1_0, + }, + StacksEpoch { + epoch_id: StacksEpochId::Epoch20, + start_height: 0, + end_height: 0, + block_limit: ExecutionCost::max_value(), + network_epoch: PEER_VERSION_EPOCH_2_0, + }, + StacksEpoch { + epoch_id: StacksEpochId::Epoch2_05, + start_height: 0, + end_height: 28, // NOTE: the first 25 burnchain blocks have no sortition + block_limit: ExecutionCost::max_value(), + network_epoch: PEER_VERSION_EPOCH_2_05, + }, + StacksEpoch { + epoch_id: StacksEpochId::Epoch21, + start_height: 28, + end_height: STACKS_EPOCH_MAX, + block_limit: ExecutionCost::max_value(), + network_epoch: PEER_VERSION_EPOCH_2_1, + }, + ]; + + peer_config.epochs = Some(epochs); + peer_config.initial_balances = initial_balances; + let burnchain = peer_config.burnchain.clone(); + + let mut peer = TestPeer::new(peer_config); + let versioned_contract_opt: RefCell> = RefCell::new(None); + let nonce: RefCell = RefCell::new(0); + + let mut make_tenure = + |miner: &mut TestMiner, + sortdb: &mut SortitionDB, + chainstate: &mut StacksChainState, + vrfproof: VRFProof, + parent_opt: Option<&StacksBlock>, + microblock_parent_opt: Option<&StacksMicroblockHeader>| { + let tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); + + let stacks_tip_opt = + NakamotoChainState::get_canonical_block_header(chainstate.db(), sortdb).unwrap(); + let parent_tip = match stacks_tip_opt { + None => StacksChainState::get_genesis_header_info(chainstate.db()).unwrap(), + Some(header_tip) => { + let ic = sortdb.index_conn(); + let snapshot = SortitionDB::get_block_snapshot_for_winning_stacks_block( + &ic, + &tip.sortition_id, + &header_tip.anchored_header.block_hash(), + ) + .unwrap() + .unwrap(); // succeeds because we don't fork + StacksChainState::get_anchored_block_header_info( + chainstate.db(), + &snapshot.consensus_hash, + &snapshot.winning_stacks_block_hash, + ) + .unwrap() + .unwrap() + } + }; + + let parent_header_hash = parent_tip.anchored_header.block_hash(); + let parent_consensus_hash = parent_tip.consensus_hash.clone(); + let parent_index_hash = StacksBlockHeader::make_index_block_hash( + &parent_consensus_hash, + &parent_header_hash, + ); + + let next_nonce = *nonce.borrow(); + let coinbase_tx = make_coinbase_with_nonce( + miner, + parent_tip.stacks_block_height as usize, + next_nonce, + None, + ); + + let versioned_contract = make_smart_contract_with_version( + miner, + next_nonce + 1, + tip.block_height.try_into().unwrap(), + 0, + Some(ClarityVersion::Clarity1), + Some(1000), + ); + + *versioned_contract_opt.borrow_mut() = Some(versioned_contract); + *nonce.borrow_mut() = next_nonce + 1; + + let mut mblock_pubkey_hash_bytes = [0u8; 20]; + mblock_pubkey_hash_bytes.copy_from_slice(&coinbase_tx.txid()[0..20]); + + let builder = StacksBlockBuilder::make_block_builder( + &burnchain, + chainstate.mainnet, + &parent_tip, + vrfproof, + tip.total_burn, + Hash160(mblock_pubkey_hash_bytes), + ) + .unwrap(); + + let anchored_block = StacksBlockBuilder::make_anchored_block_from_txs( + builder, + chainstate, + &sortdb.index_handle(&tip.sortition_id), + vec![coinbase_tx], + ) + .unwrap(); + + eprintln!("{:?}", &anchored_block.0); + (anchored_block.0, vec![]) + }; + + for i in 0..2 { + let (burn_ops, stacks_block, microblocks) = peer.make_tenure(&mut make_tenure); + let (_, _, consensus_hash) = peer.next_burnchain_block(burn_ops.clone()); + + let sortdb = peer.sortdb.take().unwrap(); + let mut node = peer.stacks_node.take().unwrap(); + + // the empty block should be accepted + match Relayer::process_new_anchored_block( + &sortdb.index_conn(), + &mut node.chainstate, + &consensus_hash, + &stacks_block, + 123, + ) { + Ok(x) => { + assert!(x, "Did not accept valid block"); + } + Err(e) => { + panic!("Got unexpected error {:?}", &e); + } + }; + + // process it + peer.coord.handle_new_stacks_block().unwrap(); + + // the mempool would reject a versioned contract transaction, since we're not yet at + // tenure 28 + let versioned_contract = (*versioned_contract_opt.borrow()).clone().unwrap(); + let versioned_contract_len = versioned_contract.serialize_to_vec().len(); + let tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); + match node.chainstate.will_admit_mempool_tx( + &sortdb.index_handle(&tip.sortition_id), + &consensus_hash, + &stacks_block.block_hash(), + &versioned_contract, + versioned_contract_len as u64, + ) { + Err(MemPoolRejection::Other(msg)) => { + assert!(msg.find("not supported in this epoch").is_some()); + } + Err(e) => { + panic!("will_admit_mempool_tx {:?}", &e); + } + Ok(_) => { + panic!("will_admit_mempool_tx succeeded"); + } + }; + + peer.sortdb = Some(sortdb); + peer.stacks_node = Some(node); + } + + // *now* it should succeed, since tenure 28 was in epoch 2.1 + let (burn_ops, stacks_block, microblocks) = peer.make_tenure(&mut make_tenure); + let (_, _, consensus_hash) = peer.next_burnchain_block(burn_ops.clone()); + + let sortdb = peer.sortdb.take().unwrap(); + let mut node = peer.stacks_node.take().unwrap(); + + let tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); + match Relayer::process_new_anchored_block( + &sortdb.index_conn(), + &mut node.chainstate, + &consensus_hash, + &stacks_block, + 123, + ) { + Ok(x) => { + assert!(x, "Failed to process valid versioned smart contract block"); + } + Err(e) => { + panic!("Got unexpected error {:?}", &e); + } + }; + + // process it + peer.coord.handle_new_stacks_block().unwrap(); + + // the mempool would accept a versioned contract transaction, since we're not yet at + // tenure 28 + let versioned_contract = (*versioned_contract_opt.borrow()).clone().unwrap(); + let versioned_contract_len = versioned_contract.serialize_to_vec().len(); + match node.chainstate.will_admit_mempool_tx( + &sortdb.index_handle(&tip.sortition_id), + &consensus_hash, + &stacks_block.block_hash(), + &versioned_contract, + versioned_contract_len as u64, + ) { + Err(e) => { + panic!("will_admit_mempool_tx {:?}", &e); + } + Ok(_) => {} + }; + + peer.sortdb = Some(sortdb); + peer.stacks_node = Some(node); +} + +// TODO: process bans +// TODO: test sending invalid blocks-available and microblocks-available (should result in a ban) +// TODO: test sending invalid transactions (should result in a ban) +// TODO: test bandwidth limits (sending too much should result in a nack, and then a ban) diff --git a/stackslib/src/net/tests/relay/mod.rs b/stackslib/src/net/tests/relay/mod.rs new file mode 100644 index 00000000000..c408e9ee60f --- /dev/null +++ b/stackslib/src/net/tests/relay/mod.rs @@ -0,0 +1,17 @@ +// Copyright (C) 2024 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +pub mod epoch2x; +pub mod nakamoto; diff --git a/stackslib/src/net/tests/relay/nakamoto.rs b/stackslib/src/net/tests/relay/nakamoto.rs new file mode 100644 index 00000000000..4df31714741 --- /dev/null +++ b/stackslib/src/net/tests/relay/nakamoto.rs @@ -0,0 +1,1137 @@ +// Copyright (C) 2024 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use std::collections::{HashMap, VecDeque}; +use std::sync::mpsc::{sync_channel, Receiver, SyncSender, TryRecvError}; +use std::thread; +use std::thread::JoinHandle; + +use clarity::vm::ast::stack_depth_checker::AST_CALL_STACK_DEPTH_BUFFER; +use clarity::vm::ast::ASTRules; +use clarity::vm::costs::LimitedCostTracker; +use clarity::vm::database::ClarityDatabase; +use clarity::vm::types::QualifiedContractIdentifier; +use clarity::vm::{ClarityVersion, MAX_CALL_STACK_DEPTH}; +use rand::Rng; +use stacks_common::address::AddressHashMode; +use stacks_common::types::chainstate::{BlockHeaderHash, StacksBlockId, StacksWorkScore, TrieHash}; +use stacks_common::types::Address; +use stacks_common::util::hash::{MerkleTree, Sha512Trunc256Sum}; +use stacks_common::util::sleep_ms; +use stacks_common::util::vrf::VRFProof; + +use super::*; +use crate::burnchains::bitcoin::indexer::BitcoinIndexer; +use crate::burnchains::tests::TestMiner; +use crate::chainstate::burn::operations::BlockstackOperationType; +use crate::chainstate::nakamoto::coordinator::tests::make_token_transfer; +use crate::chainstate::nakamoto::tests::get_account; +use crate::chainstate::nakamoto::NakamotoBlockHeader; +use crate::chainstate::stacks::boot::test::{ + key_to_stacks_addr, make_pox_4_lockup, make_signer_key_signature, with_sortdb, +}; +use crate::chainstate::stacks::db::blocks::{MINIMUM_TX_FEE, MINIMUM_TX_FEE_RATE_PER_BYTE}; +use crate::chainstate::stacks::miner::{BlockBuilderSettings, StacksMicroblockBuilder}; +use crate::chainstate::stacks::test::{ + codec_all_transactions, make_codec_test_block, make_codec_test_microblock, +}; +use crate::chainstate::stacks::tests::{ + make_coinbase, make_coinbase_with_nonce, make_smart_contract_with_version, + make_user_stacks_transfer, TestStacksNode, +}; +use crate::chainstate::stacks::{Error as ChainstateError, *}; +use crate::clarity_vm::clarity::ClarityConnection; +use crate::core::*; +use crate::net::api::getinfo::RPCPeerInfoData; +use crate::net::asn::*; +use crate::net::chat::*; +use crate::net::codec::*; +use crate::net::download::*; +use crate::net::http::{HttpRequestContents, HttpRequestPreamble}; +use crate::net::httpcore::StacksHttpMessage; +use crate::net::inv::inv2x::*; +use crate::net::relay::{AcceptedNakamotoBlocks, ProcessedNetReceipts, Relayer}; +use crate::net::test::*; +use crate::net::tests::download::epoch2x::run_get_blocks_and_microblocks; +use crate::net::tests::inv::nakamoto::make_nakamoto_peers_from_invs; +use crate::net::tests::relay::epoch2x::broadcast_message; +use crate::net::{Error as NetError, *}; +use crate::util_lib::test::*; + +/// Everything in a TestPeer, except the coordinator (which is encumbered by the lifetime of its +/// chains coordinator's event observer) +struct ExitedPeer { + pub config: TestPeerConfig, + pub network: PeerNetwork, + pub sortdb: Option, + pub miner: TestMiner, + pub stacks_node: Option, + pub relayer: Relayer, + pub mempool: Option, + pub chainstate_path: String, + pub indexer: Option, +} + +impl ExitedPeer { + /// Instantiate the exited peer from the TestPeer + fn from_test_peer(peer: TestPeer) -> Self { + Self { + config: peer.config, + network: peer.network, + sortdb: peer.sortdb, + miner: peer.miner, + stacks_node: peer.stacks_node, + relayer: peer.relayer, + mempool: peer.mempool, + chainstate_path: peer.chainstate_path, + indexer: peer.indexer, + } + } + + /// Run the network stack of the exited peer, but no more block processing will take place. + pub fn run_with_ibd( + &mut self, + ibd: bool, + dns_client: Option<&mut DNSClient>, + ) -> Result<(NetworkResult, ProcessedNetReceipts), NetError> { + let mut sortdb = self.sortdb.take().unwrap(); + let mut stacks_node = self.stacks_node.take().unwrap(); + let mut mempool = self.mempool.take().unwrap(); + let indexer = self.indexer.take().unwrap(); + + let net_result = self.network.run( + &indexer, + &mut sortdb, + &mut stacks_node.chainstate, + &mut mempool, + dns_client, + false, + ibd, + 100, + &RPCHandlerArgs::default(), + )?; + let receipts_res = self.relayer.process_network_result( + self.network.get_local_peer(), + &mut net_result.clone(), + &self.network.burnchain, + &mut sortdb, + &mut stacks_node.chainstate, + &mut mempool, + ibd, + None, + None, + ); + + self.sortdb = Some(sortdb); + self.stacks_node = Some(stacks_node); + self.mempool = Some(mempool); + self.indexer = Some(indexer); + + receipts_res.and_then(|receipts| Ok((net_result, receipts))) + } +} + +/// Messages passed to the unit test from the seed node thread +enum SeedData { + BurnOps(Vec, ConsensusHash), + Blocks(Vec), + Exit(ExitedPeer), +} + +/// Messages passed from the unit test to the seed node thread +#[derive(Clone, Debug, PartialEq)] +enum SeedCommand { + Exit, +} + +/// Communication channels from the unit test to the seed node thread +struct FollowerComms { + data_receiver: Receiver, + command_sender: SyncSender, +} + +impl FollowerComms { + pub fn send_exit(&mut self) { + self.command_sender + .send(SeedCommand::Exit) + .expect("FATAL: seed node hangup"); + } + + pub fn try_recv(&mut self) -> Option { + match self.data_receiver.try_recv() { + Ok(data) => Some(data), + Err(TryRecvError::Empty) => None, + Err(_) => { + panic!("FATAL: seed node hangup"); + } + } + } +} + +/// Communication channels from the seed node thread to the unit test +struct SeedComms { + data_sender: SyncSender, + command_receiver: Receiver, +} + +struct SeedNode {} + +impl SeedNode { + /// Have `peer` produce two reward cycles of length `rc_len`, and forward all sortitions and + /// Nakamoto blocks back to the unit test. This consumes `peer`. + /// + /// The `peer` will process its blocks locally, and _push_ them to one or more followers. The + /// `peer` will wait for there to be at least one network conversation open before advancing, + /// thereby ensuring reliable delivery of the Nakamoto blocks to at least one follower. In + /// addition, the blocks and sortitions will be sent to the unit test via `comms`. + /// + /// The contents of `peer` will be sent back to the unit test via an `ExitedPeer` struct, so + /// the unit test can query it or even run its networking stack. + pub fn main(mut peer: TestPeer, rc_len: u64, comms: SeedComms) { + let private_key = StacksPrivateKey::from_seed(&[2]); + let addr = StacksAddress::from_public_keys( + C32_ADDRESS_VERSION_TESTNET_SINGLESIG, + &AddressHashMode::SerializeP2PKH, + 1, + &vec![StacksPublicKey::from_private(&private_key)], + ) + .unwrap(); + + let mut test_signers = peer.config.test_signers.take().unwrap(); + let test_stackers = peer.config.test_stackers.take().unwrap(); + + let mut all_blocks: Vec = vec![]; + let mut all_burn_ops = vec![]; + let mut rc_blocks = vec![]; + let mut rc_burn_ops = vec![]; + + // have the peer mine some blocks for two reward cycles + for i in 0..(2 * rc_len) { + debug!("Tenure {}", i); + let (burn_ops, mut tenure_change, miner_key) = + peer.begin_nakamoto_tenure(TenureChangeCause::BlockFound); + let (_, _, consensus_hash) = peer.next_burnchain_block(burn_ops.clone()); + + // pass along to the follower + if comms + .data_sender + .send(SeedData::BurnOps(burn_ops.clone(), consensus_hash.clone())) + .is_err() + { + warn!("Follower disconnected"); + break; + } + + let vrf_proof = peer.make_nakamoto_vrf_proof(miner_key); + + tenure_change.tenure_consensus_hash = consensus_hash.clone(); + tenure_change.burn_view_consensus_hash = consensus_hash.clone(); + + let tenure_change_tx = peer + .miner + .make_nakamoto_tenure_change(tenure_change.clone()); + let coinbase_tx = peer.miner.make_nakamoto_coinbase(None, vrf_proof); + + debug!("Next burnchain block: {}", &consensus_hash); + + let num_blocks: usize = (thread_rng().gen::() % 10) + 1; + + let block_height = peer.get_burn_block_height(); + + // do a stx transfer in each block to a given recipient + let recipient_addr = + StacksAddress::from_string("ST2YM3J4KQK09V670TD6ZZ1XYNYCNGCWCVTASN5VM").unwrap(); + let blocks_and_sizes = peer.make_nakamoto_tenure( + tenure_change_tx, + coinbase_tx, + &mut test_signers, + |miner, chainstate, sortdb, blocks_so_far| { + let mut txs = vec![]; + if blocks_so_far.len() < num_blocks { + debug!("\n\nProduce block {}\n\n", all_blocks.len()); + + let account = get_account(chainstate, sortdb, &addr); + + let stx_transfer = make_token_transfer( + chainstate, + sortdb, + &private_key, + account.nonce, + 100, + 1, + &recipient_addr, + ); + txs.push(stx_transfer); + } + txs + }, + ); + + let mut blocks: Vec = blocks_and_sizes + .into_iter() + .map(|(block, _, _)| block) + .collect(); + + // run network state machine until we have a connection + loop { + let network_result_res = peer.run_with_ibd(false, None); + if let Ok((network_result, _)) = network_result_res { + if network_result.num_connected_peers > 0 { + break; + } + } + } + + // relay these blocks + let local_peer = peer.network.get_local_peer().clone(); + let sortdb = peer.sortdb.take().unwrap(); + let stacks_node = peer.stacks_node.take().unwrap(); + + peer.relayer.relay_epoch3_blocks( + &local_peer, + &sortdb, + &stacks_node.chainstate, + vec![AcceptedNakamotoBlocks { + relayers: vec![], + blocks: blocks.clone(), + }], + true, + ); + + peer.sortdb = Some(sortdb); + peer.stacks_node = Some(stacks_node); + + // send the blocks to the unit test as well + if comms + .data_sender + .send(SeedData::Blocks(blocks.clone())) + .is_err() + { + warn!("Follower disconnected"); + break; + } + + // if we're starting a new reward cycle, then save the current one + let tip = { + let sort_db = peer.sortdb.as_mut().unwrap(); + SortitionDB::get_canonical_burn_chain_tip(sort_db.conn()).unwrap() + }; + if peer + .config + .burnchain + .is_reward_cycle_start(tip.block_height) + { + rc_blocks.push(all_blocks.clone()); + rc_burn_ops.push(all_burn_ops.clone()); + + all_burn_ops.clear(); + all_blocks.clear(); + } + + all_blocks.append(&mut blocks); + all_burn_ops.push(burn_ops); + } + + peer.config.test_signers = Some(test_signers); + peer.config.test_stackers = Some(test_stackers); + + let exited_peer = ExitedPeer::from_test_peer(peer); + + // inform the follower that we're done, and pass along the final state of the peer + if comms.data_sender.send(SeedData::Exit(exited_peer)).is_err() { + panic!("Follower disconnected"); + } + + // wait for request to exit + let Ok(SeedCommand::Exit) = comms.command_receiver.recv() else { + panic!("FATAL: did not receive shutdown request (follower must have crashed)"); + }; + } + + /// Instantiate bidirectional communication channels between the unit test and seed node + pub fn comms() -> (SeedComms, FollowerComms) { + let (data_sender, data_receiver) = sync_channel(1024); + let (command_sender, command_receiver) = sync_channel(1024); + + let seed_comms = SeedComms { + data_sender, + command_receiver, + }; + + let follower_comms = FollowerComms { + data_receiver, + command_sender, + }; + + (seed_comms, follower_comms) + } +} + +/// Test buffering limits +#[test] +fn test_buffer_data_message() { + let observer = TestEventObserver::new(); + let bitvecs = vec![vec![ + true, true, true, true, true, true, true, true, true, true, + ]]; + + let (mut peer, _followers) = + make_nakamoto_peers_from_invs(function_name!(), &observer, 10, 5, bitvecs.clone(), 1); + + let nakamoto_block = NakamotoBlock { + header: NakamotoBlockHeader { + version: 1, + chain_length: 457, + burn_spent: 126, + consensus_hash: ConsensusHash([0x55; 20]), + parent_block_id: StacksBlockId([0x03; 32]), + tx_merkle_root: Sha512Trunc256Sum([0x05; 32]), + state_index_root: TrieHash([0x07; 32]), + timestamp: 8, + miner_signature: MessageSignature::empty(), + signer_signature: vec![], + pox_treatment: BitVec::zeros(1).unwrap(), + }, + txs: vec![], + }; + + let blocks_available = StacksMessage::new( + 1, + 1, + 1, + &BurnchainHeaderHash([0x01; 32]), + 7, + &BurnchainHeaderHash([0x07; 32]), + StacksMessageType::BlocksAvailable(BlocksAvailableData { + available: vec![ + (ConsensusHash([0x11; 20]), BurnchainHeaderHash([0x22; 32])), + (ConsensusHash([0x33; 20]), BurnchainHeaderHash([0x44; 32])), + ], + }), + ); + + let microblocks_available = StacksMessage::new( + 1, + 1, + 1, + &BurnchainHeaderHash([0x01; 32]), + 7, + &BurnchainHeaderHash([0x07; 32]), + StacksMessageType::MicroblocksAvailable(BlocksAvailableData { + available: vec![ + (ConsensusHash([0x11; 20]), BurnchainHeaderHash([0x22; 32])), + (ConsensusHash([0x33; 20]), BurnchainHeaderHash([0x44; 32])), + ], + }), + ); + + let block = StacksMessage::new( + 1, + 1, + 1, + &BurnchainHeaderHash([0x01; 32]), + 7, + &BurnchainHeaderHash([0x07; 32]), + StacksMessageType::Blocks(BlocksData { + blocks: vec![BlocksDatum( + ConsensusHash([0x11; 20]), + make_codec_test_block(10, StacksEpochId::Epoch25), + )], + }), + ); + let microblocks = StacksMessage::new( + 1, + 1, + 1, + &BurnchainHeaderHash([0x01; 32]), + 7, + &BurnchainHeaderHash([0x07; 32]), + StacksMessageType::Microblocks(MicroblocksData { + index_anchor_block: StacksBlockId([0x55; 32]), + microblocks: vec![make_codec_test_microblock(10)], + }), + ); + let nakamoto_block = StacksMessage::new( + 1, + 1, + 1, + &BurnchainHeaderHash([0x01; 32]), + 7, + &BurnchainHeaderHash([0x07; 32]), + StacksMessageType::NakamotoBlocks(NakamotoBlocksData { + blocks: vec![nakamoto_block], + }), + ); + + for _ in 0..peer.network.connection_opts.max_buffered_blocks_available { + assert!(peer + .network + .buffer_data_message(0, blocks_available.clone())); + } + assert!(!peer + .network + .buffer_data_message(0, blocks_available.clone())); + + for _ in 0..peer + .network + .connection_opts + .max_buffered_microblocks_available + { + assert!(peer + .network + .buffer_data_message(0, microblocks_available.clone())); + } + assert!(!peer + .network + .buffer_data_message(0, microblocks_available.clone())); + + for _ in 0..peer.network.connection_opts.max_buffered_blocks { + assert!(peer.network.buffer_data_message(0, block.clone())); + } + assert!(!peer.network.buffer_data_message(0, block.clone())); + + for _ in 0..peer.network.connection_opts.max_buffered_microblocks { + assert!(peer.network.buffer_data_message(0, microblocks.clone())); + } + assert!(!peer.network.buffer_data_message(0, microblocks.clone())); + + for _ in 0..peer.network.connection_opts.max_buffered_nakamoto_blocks { + assert!(peer.network.buffer_data_message(0, nakamoto_block.clone())); + } + assert!(!peer.network.buffer_data_message(0, nakamoto_block.clone())); +} + +/// Verify that Nakmaoto blocks whose sortitions are known will *not* be buffered, but instead +/// forwarded to the relayer for processing. +#[test] +fn test_no_buffer_ready_nakamoto_blocks() { + let observer = TestEventObserver::new(); + let bitvecs = vec![vec![ + true, true, true, true, true, true, true, true, true, true, + ]]; + + let rc_len = 10u64; + let (peer, mut followers) = make_nakamoto_peers_from_invs( + function_name!(), + &observer, + rc_len as u32, + 5, + bitvecs.clone(), + 1, + ); + let peer_nk = peer.to_neighbor().addr; + let mut follower = followers.pop().unwrap(); + + let test_path = TestPeer::make_test_path(&follower.config); + let stackerdb_path = format!("{}/stacker_db.sqlite", &test_path); + let follower_stacker_dbs = StackerDBs::connect(&stackerdb_path, true).unwrap(); + let mut follower_relayer = Relayer::from_p2p(&mut follower.network, follower_stacker_dbs); + + // disable the follower's ability to download blocks from the seed peer + follower.network.connection_opts.disable_block_download = true; + follower.config.connection_opts.disable_block_download = true; + + let (seed_comms, mut follower_comms) = SeedNode::comms(); + + thread::scope(|s| { + s.spawn(|| { + SeedNode::main(peer, rc_len, seed_comms); + }); + + let mut seed_exited = false; + let mut exited_peer = None; + let (mut follower_dns_client, follower_dns_thread_handle) = dns_thread_start(100); + + while !seed_exited { + let mut network_result = follower + .step_with_ibd_and_dns(true, Some(&mut follower_dns_client)) + .ok(); + + match follower_comms.try_recv() { + None => {} + Some(SeedData::BurnOps(burn_ops, consensus_hash)) => { + debug!("Follower got {}: {:?}", &consensus_hash, &burn_ops); + let (_, _, follower_consensus_hash) = + follower.next_burnchain_block(burn_ops.clone()); + assert_eq!(follower_consensus_hash, consensus_hash); + } + Some(SeedData::Blocks(blocks)) => { + debug!("Follower got Nakamoto blocks {:?}", &blocks); + + let mut sortdb = follower.sortdb.take().unwrap(); + let mut node = follower.stacks_node.take().unwrap(); + + let tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); + + // no need to buffer this because we can process it right away + let buffer = follower + .network + .inner_handle_unsolicited_NakamotoBlocksData( + &sortdb, + &node.chainstate, + Some(peer_nk.clone()), + &NakamotoBlocksData { + blocks: blocks.clone(), + }, + ); + assert!(!buffer); + + // we need these blocks, but we don't need to buffer them + for block in blocks.iter() { + assert!(!follower.network.is_nakamoto_block_bufferable( + &sortdb, + &node.chainstate, + block + )); + + // suppose these blocks were invalid -- they would not be bufferable. + // bad signature? not bufferable + let mut bad_block = block.clone(); + let block_sn = SortitionDB::get_block_snapshot_consensus( + sortdb.conn(), + &bad_block.header.consensus_hash, + ) + .unwrap() + .unwrap(); + bad_block + .header + .signer_signature + .push(bad_block.header.signer_signature.last().cloned().unwrap()); + assert_eq!( + follower + .network + .find_nakamoto_block_reward_cycle(&sortdb, &bad_block), + ( + Some( + follower + .network + .burnchain + .pox_reward_cycle(block_sn.block_height) + .unwrap() + ), + true + ) + ); + assert!(!follower.network.is_nakamoto_block_bufferable( + &sortdb, + &node.chainstate, + &bad_block + )); + + // unrecognized consensus hash + let mut bad_block = block.clone(); + bad_block.header.consensus_hash = ConsensusHash([0xde; 20]); + assert_eq!( + follower + .network + .find_nakamoto_block_reward_cycle(&sortdb, &bad_block), + ( + Some( + follower + .network + .burnchain + .pox_reward_cycle( + follower.network.burnchain_tip.block_height + ) + .unwrap() + ), + false + ) + ); + + // stale consensus hash + let mut bad_block = block.clone(); + let ancestor_sn = SortitionDB::get_ancestor_snapshot( + &sortdb.index_conn(), + 1, + &tip.sortition_id, + ) + .unwrap() + .unwrap(); + bad_block.header.consensus_hash = ancestor_sn.consensus_hash; + assert_eq!( + follower + .network + .find_nakamoto_block_reward_cycle(&sortdb, &bad_block), + ( + Some( + follower + .network + .burnchain + .pox_reward_cycle(ancestor_sn.block_height) + .unwrap() + ), + true + ) + ); + } + + // go process the blocks _as if_ they came from a network result + let mut unsolicited = HashMap::new(); + let msg = StacksMessage::from_chain_view( + follower.network.bound_neighbor_key().peer_version, + follower.network.bound_neighbor_key().network_id, + follower.network.get_chain_view(), + StacksMessageType::NakamotoBlocks(NakamotoBlocksData { + blocks: blocks.clone(), + }), + ); + unsolicited.insert(peer_nk.clone(), vec![msg]); + + if let Some(mut network_result) = network_result.take() { + network_result.consume_unsolicited(unsolicited); + let num_processed = follower_relayer.process_new_epoch3_blocks( + follower.network.get_local_peer(), + &mut network_result, + &follower.network.burnchain, + &mut sortdb, + &mut node.chainstate, + true, + None, + ); + + // because we process in order, they should all get processed + assert_eq!(num_processed, blocks.len() as u64); + } + + // no need to buffer if we already have the block + let buffer = follower + .network + .inner_handle_unsolicited_NakamotoBlocksData( + &sortdb, + &node.chainstate, + Some(peer_nk.clone()), + &NakamotoBlocksData { + blocks: blocks.clone(), + }, + ); + assert!(!buffer); + + // we don't need these blocks anymore + for block in blocks.iter() { + assert!(!follower.network.is_nakamoto_block_bufferable( + &sortdb, + &node.chainstate, + block + )); + } + + follower.stacks_node = Some(node); + follower.sortdb = Some(sortdb); + } + Some(SeedData::Exit(exited)) => { + debug!("Follower got seed exit"); + seed_exited = true; + exited_peer = Some(exited); + follower_comms.send_exit(); + } + } + + follower.coord.handle_new_burnchain_block().unwrap(); + follower.coord.handle_new_stacks_block().unwrap(); + follower.coord.handle_new_nakamoto_stacks_block().unwrap(); + } + + // compare chain tips + let sortdb = follower.sortdb.take().unwrap(); + let stacks_node = follower.stacks_node.take().unwrap(); + let follower_burn_tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); + let follower_stacks_tip = + NakamotoChainState::get_canonical_block_header(stacks_node.chainstate.db(), &sortdb) + .unwrap(); + follower.stacks_node = Some(stacks_node); + follower.sortdb = Some(sortdb); + + let mut exited_peer = exited_peer.unwrap(); + let sortdb = exited_peer.sortdb.take().unwrap(); + let stacks_node = exited_peer.stacks_node.take().unwrap(); + let exited_peer_burn_tip = + SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); + let exited_peer_stacks_tip = + NakamotoChainState::get_canonical_block_header(stacks_node.chainstate.db(), &sortdb) + .unwrap(); + exited_peer.stacks_node = Some(stacks_node); + exited_peer.sortdb = Some(sortdb); + + assert_eq!(exited_peer_burn_tip, follower_burn_tip); + assert_eq!(exited_peer_stacks_tip, follower_stacks_tip); + }); +} + +/// Verify that Nakamoto blocks whose sortitions are not yet known will be buffered, and sent to +/// the relayer once the burnchain advances. +#[test] +fn test_buffer_nonready_nakamoto_blocks() { + let observer = TestEventObserver::new(); + let bitvecs = vec![vec![ + true, true, true, true, true, true, true, true, true, true, + ]]; + + let rc_len = 10u64; + let (peer, mut followers) = make_nakamoto_peers_from_invs( + function_name!(), + &observer, + rc_len as u32, + 5, + bitvecs.clone(), + 1, + ); + let peer_nk = peer.to_neighbor().addr; + let mut follower = followers.pop().unwrap(); + + let test_path = TestPeer::make_test_path(&follower.config); + let stackerdb_path = format!("{}/stacker_db.sqlite", &test_path); + let follower_stacker_dbs = StackerDBs::connect(&stackerdb_path, true).unwrap(); + let mut follower_relayer = Relayer::from_p2p(&mut follower.network, follower_stacker_dbs); + + // disable the follower's ability to download blocks from the seed peer + follower.network.connection_opts.disable_block_download = true; + follower.config.connection_opts.disable_block_download = true; + + // don't authenticate unsolicited messages, since this test directly pushes them + follower + .network + .connection_opts + .test_disable_unsolicited_message_authentication = true; + follower + .config + .connection_opts + .test_disable_unsolicited_message_authentication = true; + + let (seed_comms, mut follower_comms) = SeedNode::comms(); + + let mut buffered_burn_ops = VecDeque::new(); + let mut all_blocks = vec![]; + + thread::scope(|s| { + s.spawn(|| { + SeedNode::main(peer, rc_len, seed_comms); + }); + + let mut seed_exited = false; + let mut exited_peer = None; + let (mut follower_dns_client, follower_dns_thread_handle) = dns_thread_start(100); + + while !seed_exited { + let mut network_result = follower + .step_with_ibd_and_dns(true, Some(&mut follower_dns_client)) + .ok(); + + match follower_comms.try_recv() { + None => {} + Some(SeedData::BurnOps(burn_ops, consensus_hash)) => { + debug!( + "Follower got and will buffer {}: {:?}", + &consensus_hash, &burn_ops + ); + buffered_burn_ops.push_back((burn_ops, consensus_hash)); + if buffered_burn_ops.len() > 1 { + let (buffered_burn_ops, buffered_consensus_hash) = + buffered_burn_ops.pop_front().unwrap(); + debug!( + "Follower will process {}: {:?}", + &buffered_consensus_hash, &buffered_burn_ops + ); + let (_, _, follower_consensus_hash) = + follower.next_burnchain_block(buffered_burn_ops.clone()); + assert_eq!(follower_consensus_hash, buffered_consensus_hash); + } + } + Some(SeedData::Blocks(blocks)) => { + debug!("Follower got Nakamoto blocks {:?}", &blocks); + all_blocks.push(blocks.clone()); + + let sortdb = follower.sortdb.take().unwrap(); + let node = follower.stacks_node.take().unwrap(); + + // we will need to buffer this since the sortition for these blocks hasn't been + // processed yet + let buffer = follower + .network + .inner_handle_unsolicited_NakamotoBlocksData( + &sortdb, + &node.chainstate, + Some(peer_nk.clone()), + &NakamotoBlocksData { + blocks: blocks.clone(), + }, + ); + assert!(buffer); + + // we need these blocks, but we can't process them yet + for block in blocks.iter() { + assert!(follower.network.is_nakamoto_block_bufferable( + &sortdb, + &node.chainstate, + block + )); + } + + // pass this and other blocks to the p2p network's unsolicited message handler, + // so they can be buffered up and processed. + let mut unsolicited_msgs: HashMap> = HashMap::new(); + for (event_id, convo) in follower.network.peers.iter() { + for blks in all_blocks.iter() { + let msg = StacksMessage::from_chain_view( + follower.network.bound_neighbor_key().peer_version, + follower.network.bound_neighbor_key().network_id, + follower.network.get_chain_view(), + StacksMessageType::NakamotoBlocks(NakamotoBlocksData { + blocks: blks.clone(), + }), + ); + + if let Some(msgs) = unsolicited_msgs.get_mut(event_id) { + msgs.push(msg); + } else { + unsolicited_msgs.insert(*event_id, vec![msg]); + } + } + } + + follower.network.handle_unsolicited_messages( + &sortdb, + &node.chainstate, + unsolicited_msgs, + true, + true, + ); + + follower.stacks_node = Some(node); + follower.sortdb = Some(sortdb); + } + Some(SeedData::Exit(exited)) => { + debug!("Follower got seed exit"); + + // process the last burnchain sortitions + while let Some((buffered_burn_ops, buffered_consensus_hash)) = + buffered_burn_ops.pop_front() + { + debug!( + "Follower will process {}: {:?}", + &buffered_consensus_hash, &buffered_burn_ops + ); + let (_, _, follower_consensus_hash) = + follower.next_burnchain_block(buffered_burn_ops.clone()); + assert_eq!(follower_consensus_hash, buffered_consensus_hash); + } + + // process the last buffered messages + let mut sortdb = follower.sortdb.take().unwrap(); + let mut node = follower.stacks_node.take().unwrap(); + + if let Some(mut network_result) = network_result.take() { + follower_relayer.process_new_epoch3_blocks( + follower.network.get_local_peer(), + &mut network_result, + &follower.network.burnchain, + &mut sortdb, + &mut node.chainstate, + true, + None, + ); + } + + follower.stacks_node = Some(node); + follower.sortdb = Some(sortdb); + + network_result = follower + .step_with_ibd_and_dns(true, Some(&mut follower_dns_client)) + .ok(); + + seed_exited = true; + exited_peer = Some(exited); + follower_comms.send_exit(); + } + } + + if let Some(mut network_result) = network_result.take() { + let mut sortdb = follower.sortdb.take().unwrap(); + let mut node = follower.stacks_node.take().unwrap(); + let num_processed = follower_relayer.process_new_epoch3_blocks( + follower.network.get_local_peer(), + &mut network_result, + &follower.network.burnchain, + &mut sortdb, + &mut node.chainstate, + true, + None, + ); + info!("Processed {} unsolicited Nakamoto blocks", num_processed); + follower.stacks_node = Some(node); + follower.sortdb = Some(sortdb); + } + + follower.coord.handle_new_burnchain_block().unwrap(); + follower.coord.handle_new_stacks_block().unwrap(); + follower.coord.handle_new_nakamoto_stacks_block().unwrap(); + } + + // compare chain tips + let sortdb = follower.sortdb.take().unwrap(); + let stacks_node = follower.stacks_node.take().unwrap(); + let follower_burn_tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); + let follower_stacks_tip = + NakamotoChainState::get_canonical_block_header(stacks_node.chainstate.db(), &sortdb) + .unwrap(); + follower.stacks_node = Some(stacks_node); + follower.sortdb = Some(sortdb); + + let mut exited_peer = exited_peer.unwrap(); + let sortdb = exited_peer.sortdb.take().unwrap(); + let stacks_node = exited_peer.stacks_node.take().unwrap(); + let exited_peer_burn_tip = + SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); + let exited_peer_stacks_tip = + NakamotoChainState::get_canonical_block_header(stacks_node.chainstate.db(), &sortdb) + .unwrap(); + exited_peer.stacks_node = Some(stacks_node); + exited_peer.sortdb = Some(sortdb); + + assert_eq!( + exited_peer_burn_tip.sortition_id, + follower_burn_tip.sortition_id + ); + assert_eq!(exited_peer_stacks_tip, follower_stacks_tip); + }); +} + +/// Boot a follower off of a seed node by having the seed node push its blocks to the follower via +/// the p2p stack. The follower will buffer up Nakamoto blocks and forward them to its relayer as +/// needed. +#[test] +fn test_nakamoto_boot_node_from_block_push() { + let observer = TestEventObserver::new(); + let bitvecs = vec![ + // full reward cycle + vec![true, true, true, true, true, true, true, true, true, true], + ]; + + let rc_len = 10u64; + let (peer, mut followers) = make_nakamoto_peers_from_invs( + function_name!(), + &observer, + rc_len as u32, + 5, + bitvecs.clone(), + 1, + ); + let peer_nk = peer.to_neighbor().addr; + let mut follower = followers.pop().unwrap(); + + let test_path = TestPeer::make_test_path(&follower.config); + let stackerdb_path = format!("{}/stacker_db.sqlite", &test_path); + let follower_stacker_dbs = StackerDBs::connect(&stackerdb_path, true).unwrap(); + + // disable the follower's ability to download blocks from the seed peer + follower.network.connection_opts.disable_block_download = true; + follower.config.connection_opts.disable_block_download = true; + + let (seed_comms, mut follower_comms) = SeedNode::comms(); + + thread::scope(|s| { + s.spawn(|| { + SeedNode::main(peer, rc_len, seed_comms); + }); + + let mut seed_exited = false; + let mut exited_peer = None; + let (mut follower_dns_client, follower_dns_thread_handle) = dns_thread_start(100); + + while !seed_exited { + // follower will forward pushed data to its relayer + loop { + let network_result_res = + follower.run_with_ibd(true, Some(&mut follower_dns_client)); + if let Ok((network_result, _)) = network_result_res { + if network_result.num_connected_peers > 0 { + break; + } + } + } + + match follower_comms.try_recv() { + None => {} + Some(SeedData::BurnOps(burn_ops, consensus_hash)) => { + debug!("Follower will process {}: {:?}", &consensus_hash, &burn_ops); + let (_, _, follower_ch) = follower.next_burnchain_block(burn_ops.clone()); + assert_eq!(follower_ch, consensus_hash); + } + Some(SeedData::Blocks(blocks)) => { + debug!("Follower got Nakamoto blocks {:?}", &blocks); + } + Some(SeedData::Exit(exited)) => { + debug!("Follower got seed exit"); + + seed_exited = true; + exited_peer = Some(exited); + follower_comms.send_exit(); + } + } + + follower.coord.handle_new_burnchain_block().unwrap(); + follower.coord.handle_new_stacks_block().unwrap(); + follower.coord.handle_new_nakamoto_stacks_block().unwrap(); + } + + // recover exited peer and get its chain tips + let mut exited_peer = exited_peer.unwrap(); + let sortdb = exited_peer.sortdb.take().unwrap(); + let stacks_node = exited_peer.stacks_node.take().unwrap(); + let exited_peer_burn_tip = + SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); + let exited_peer_stacks_tip = + NakamotoChainState::get_canonical_block_header(stacks_node.chainstate.db(), &sortdb) + .unwrap(); + exited_peer.stacks_node = Some(stacks_node); + exited_peer.sortdb = Some(sortdb); + + let mut synced = false; + for i in 0..100 { + // let the follower catch up to and keep talking to the exited peer + exited_peer.run_with_ibd(false, None).unwrap(); + follower + .run_with_ibd(true, Some(&mut follower_dns_client)) + .unwrap(); + + // compare chain tips + let sortdb = follower.sortdb.take().unwrap(); + let stacks_node = follower.stacks_node.take().unwrap(); + let follower_burn_tip = + SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); + let follower_stacks_tip = NakamotoChainState::get_canonical_block_header( + stacks_node.chainstate.db(), + &sortdb, + ) + .unwrap(); + follower.stacks_node = Some(stacks_node); + follower.sortdb = Some(sortdb); + + debug!("{}: Follower sortition tip: {:?}", i, &follower_burn_tip); + debug!("{}: Seed sortition tip: {:?}", i, &exited_peer_burn_tip); + debug!("{}: Follower stacks tip: {:?}", i, &follower_stacks_tip); + debug!("{}: Seed stacks tip: {:?}", i, &exited_peer_stacks_tip); + + if exited_peer_burn_tip.consensus_hash == follower_burn_tip.consensus_hash + && exited_peer_stacks_tip == follower_stacks_tip + { + synced = true; + break; + } + } + + assert!(synced); + }); +} diff --git a/stackslib/src/net/unsolicited.rs b/stackslib/src/net/unsolicited.rs new file mode 100644 index 00000000000..5aeadc3dfd5 --- /dev/null +++ b/stackslib/src/net/unsolicited.rs @@ -0,0 +1,1114 @@ +// Copyright (C) 2013-2020 Blockstack PBC, a public benefit corporation +// Copyright (C) 2020-2023 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use std::collections::HashMap; + +use stacks_common::types::chainstate::{BlockHeaderHash, ConsensusHash}; + +use crate::chainstate::burn::db::sortdb::SortitionDB; +use crate::chainstate::nakamoto::NakamotoBlock; +use crate::chainstate::stacks::db::StacksChainState; +use crate::chainstate::stacks::{Error as ChainstateError, StacksBlockHeader}; +use crate::net::p2p::{PeerNetwork, PeerNetworkWorkState}; +use crate::net::{ + BlocksAvailableData, BlocksData, BlocksDatum, Error as NetError, MicroblocksData, + NakamotoBlocksData, NeighborKey, Preamble, StacksMessage, StacksMessageType, +}; + +/// This module contains all of the code needed to handle unsolicited messages -- that is, messages +/// that get pushed to us. These include: +/// +/// * BlocksAvailable (epoch 2.x) +/// * MicroblocksAvailable (epoch 2.x) +/// * BlocksData (epoch 2.x) +/// * NakamotoBlocksData (epoch 3.x) +/// +/// Normally, the PeerNetwork will attempt to validate each message and pass it to the Relayer via +/// a NetworkResult. However, some kinds of messages (such as these) cannot be always be +/// validated, because validation depends on chainstate data that is not yet available. For +/// example, if this node is behind the burnchain chain tip, it will be unable to verify blocks +/// pushed to it for sortitions that have yet to be processed locally. +/// +/// In the event that a message cannot be validated, the PeerNetwork will instead store these +/// messages internally (in `self.pending_messages`), and try to validate them again once the +/// burnchain view changes. +/// +/// Transactions are not considered here, but are handled separately with the mempool +/// synchronization state machine. + +impl PeerNetwork { + #[cfg_attr(test, mutants::skip)] + /// Check that the sender is authenticated. + /// Returns Some(remote sender address) if so + /// Returns None otherwise + fn check_peer_authenticated(&self, event_id: usize) -> Option { + let Some((remote_neighbor_key, remote_is_authenticated)) = self + .peers + .get(&event_id) + .map(|convo| (convo.to_neighbor_key(), convo.is_authenticated())) + else { + test_debug!( + "{:?}: No such neighbor event={}", + &self.local_peer, + event_id + ); + return None; + }; + + if !remote_is_authenticated { + // drop -- a correct peer will have authenticated before sending this message + test_debug!( + "{:?}: Unauthenticated neighbor {:?}", + &self.local_peer, + &remote_neighbor_key + ); + return None; + } + Some(remote_neighbor_key) + } + + /// Update a peer's inventory state to indicate that the given block is available. + /// If updated, return the sortition height of the bit in the inv that was set. + /// Only valid for epoch 2.x + fn handle_unsolicited_inv_update_epoch2x( + &mut self, + sortdb: &SortitionDB, + event_id: usize, + outbound_neighbor_key: &NeighborKey, + consensus_hash: &ConsensusHash, + microblocks: bool, + ) -> Result, NetError> { + let Some(inv) = self.inv_state.as_mut() else { + return Ok(None); + }; + + let res = if microblocks { + inv.set_microblocks_available( + &self.burnchain, + outbound_neighbor_key, + sortdb, + consensus_hash, + ) + } else { + inv.set_block_available( + &self.burnchain, + outbound_neighbor_key, + sortdb, + consensus_hash, + ) + }; + + let block_sortition_height = match res { + Ok(Some(block_height)) => block_height, + Ok(None) => { + debug!( + "{:?}: We already know the inventory state in {} for {}", + &self.local_peer, outbound_neighbor_key, consensus_hash + ); + return Ok(None); + } + Err(NetError::NotFoundError) => { + // is this remote node simply ahead of us? + if let Some(convo) = self.peers.get(&event_id) { + if self.chain_view.burn_block_height < convo.burnchain_tip_height { + debug!("{:?}: Unrecognized consensus hash {}; it is possible that {} is ahead of us", &self.local_peer, consensus_hash, outbound_neighbor_key); + return Err(NetError::NotFoundError); + } + } + // not ahead of us -- it's a bad consensus hash + debug!("{:?}: Unrecognized consensus hash {}; assuming that {} has a different chain view", &self.local_peer, consensus_hash, outbound_neighbor_key); + return Ok(None); + } + Err(NetError::InvalidMessage) => { + // punish this peer + info!( + "Peer {:?} sent an invalid update for {}", + &outbound_neighbor_key, + if microblocks { + "streamed microblocks" + } else { + "blocks" + } + ); + self.bans.insert(event_id); + + if let Some(outbound_event_id) = self.events.get(&outbound_neighbor_key) { + self.bans.insert(*outbound_event_id); + } + return Ok(None); + } + Err(e) => { + warn!( + "Failed to update inv state for {:?}: {:?}", + &outbound_neighbor_key, &e + ); + return Ok(None); + } + }; + Ok(Some(block_sortition_height)) + } + + #[cfg_attr(test, mutants::skip)] + /// Determine whether or not the system can buffer up this message, based on site-local + /// configuration options. + /// Return true if so, false if not + pub(crate) fn can_buffer_data_message( + &self, + event_id: usize, + msgs: &[StacksMessage], + msg: &StacksMessage, + ) -> bool { + // check limits against connection opts, and if the limit is not met, then buffer up the + // message. + let mut blocks_available = 0; + let mut microblocks_available = 0; + let mut blocks_data = 0; + let mut microblocks_data = 0; + let mut nakamoto_blocks_data = 0; + for stored_msg in msgs.iter() { + match &stored_msg.payload { + StacksMessageType::BlocksAvailable(_) => { + blocks_available += 1; + if matches!(&msg.payload, StacksMessageType::BlocksAvailable(..)) + && blocks_available >= self.connection_opts.max_buffered_blocks_available + { + debug!( + "{:?}: Cannot buffer BlocksAvailable from event {} -- already have {} buffered", + &self.local_peer, event_id, blocks_available + ); + return false; + } + } + StacksMessageType::MicroblocksAvailable(_) => { + microblocks_available += 1; + if matches!(&msg.payload, StacksMessageType::MicroblocksAvailable(..)) + && microblocks_available + >= self.connection_opts.max_buffered_microblocks_available + { + debug!( + "{:?}: Cannot buffer MicroblocksAvailable from event {} -- already have {} buffered", + &self.local_peer, event_id, microblocks_available + ); + return false; + } + } + StacksMessageType::Blocks(_) => { + blocks_data += 1; + if matches!(&msg.payload, StacksMessageType::Blocks(..)) + && blocks_data >= self.connection_opts.max_buffered_blocks + { + debug!( + "{:?}: Cannot buffer BlocksData from event {} -- already have {} buffered", + &self.local_peer, event_id, blocks_data + ); + return false; + } + } + StacksMessageType::Microblocks(_) => { + microblocks_data += 1; + if matches!(&msg.payload, StacksMessageType::Microblocks(..)) + && microblocks_data >= self.connection_opts.max_buffered_microblocks + { + debug!( + "{:?}: Cannot buffer MicroblocksData from event {} -- already have {} buffered", + &self.local_peer, event_id, microblocks_data + ); + return false; + } + } + StacksMessageType::NakamotoBlocks(_) => { + nakamoto_blocks_data += 1; + if matches!(&msg.payload, StacksMessageType::NakamotoBlocks(..)) + && nakamoto_blocks_data >= self.connection_opts.max_buffered_nakamoto_blocks + { + debug!( + "{:?}: Cannot buffer NakamotoBlocksData from event {} -- already have {} buffered", + &self.local_peer, event_id, nakamoto_blocks_data + ); + return false; + } + } + _ => {} + } + } + + true + } + + #[cfg_attr(test, mutants::skip)] + /// Buffer a message for re-processing once the burnchain view updates. + /// If there is no space for the message, then silently drop it. + /// Returns true if buffered. + /// Returns false if not. + pub(crate) fn buffer_data_message(&mut self, event_id: usize, msg: StacksMessage) -> bool { + let Some(msgs) = self.pending_messages.get(&event_id) else { + self.pending_messages.insert(event_id, vec![msg]); + debug!( + "{:?}: Event {} has 1 messages buffered", + &self.local_peer, event_id + ); + return true; + }; + + // check limits against connection opts, and if the limit is not met, then buffer up the + // message. + if !self.can_buffer_data_message(event_id, msgs, &msg) { + return false; + } + + if let Some(msgs) = self.pending_messages.get_mut(&event_id) { + // should always be reachable + msgs.push(msg); + debug!( + "{:?}: Event {} has {} messages buffered", + &self.local_peer, + event_id, + msgs.len() + ); + } + true + } + + /// Do we need a block or microblock stream, given its sortition's consensus hash? + fn need_block_or_microblock_stream( + sortdb: &SortitionDB, + chainstate: &StacksChainState, + consensus_hash: &ConsensusHash, + is_microblock: bool, + ) -> Result { + let sn = SortitionDB::get_block_snapshot_consensus(sortdb.conn(), &consensus_hash)? + .ok_or(ChainstateError::NoSuchBlockError)?; + let block_hash_opt = if sn.sortition { + Some(sn.winning_stacks_block_hash) + } else { + None + }; + + let inv = chainstate.get_blocks_inventory(&[(consensus_hash.clone(), block_hash_opt)])?; + if is_microblock { + // checking for microblock absence + Ok(inv.microblocks_bitvec[0] == 0) + } else { + // checking for block absence + Ok(inv.block_bitvec[0] == 0) + } + } + + /// Handle unsolicited BlocksAvailable. If it is valid, and it represents a block that this + /// peer does not have, then hint to the epoch2x downloader that it needs to go and fetch it. + /// Also, update this peer's copy of the remote sender's inv to indicate that it has the block, + /// so the downloader can eventually request the block regardless of whether or not the hint is + /// effective. + /// + /// This function only accepts BlocksAvailable messages from outbound peers, since we only + /// track inventories for outbound peers. + /// + /// The caller can call this in one of two ways: with `buffer` set to `true` or `false`. If + /// `buffer` is `true`, then the caller is asking to know if the message can be buffered if it + /// cannot be handled. If it is instead `false`, then the caller is asking to simply try and + /// handle the given message. In both cases, the blocks' validity will be checked against the + /// sortition DB, and if they correspond to real sortitions, then the remote peer's inventory + /// will be updated and the local peer's downloader will be alerted to this block. + /// + /// Errors pertaining to the validity of the message are logged but not returned. + fn handle_unsolicited_BlocksAvailable( + &mut self, + sortdb: &SortitionDB, + chainstate: &StacksChainState, + event_id: usize, + new_blocks: &BlocksAvailableData, + ibd: bool, + buffer: bool, + ) -> bool { + let Some(outbound_neighbor_key) = self.find_outbound_neighbor(event_id) else { + // we only accept BlocksAvailable from outbound peers, since we only crawl invs from + // outbound peers. + return false; + }; + + debug!( + "{:?}: Process BlocksAvailable from {:?} with {} entries", + &self.local_peer, + &outbound_neighbor_key, + new_blocks.available.len() + ); + + let mut to_buffer = false; + for (consensus_hash, block_hash) in new_blocks.available.iter() { + let block_sortition_height = match self.handle_unsolicited_inv_update_epoch2x( + sortdb, + event_id, + &outbound_neighbor_key, + consensus_hash, + false, + ) { + Ok(Some(bsh)) => bsh, + Ok(None) => { + continue; + } + Err(NetError::NotFoundError) => { + if buffer { + debug!("{:?}: Will buffer BlocksAvailable for {} until the next burnchain view update", &self.local_peer, &consensus_hash); + to_buffer = true; + } + continue; + } + Err(e) => { + info!( + "{:?}: Failed to handle BlocksAvailable({}/{}) from {}: {:?}", + &self.local_peer, &consensus_hash, &block_hash, &outbound_neighbor_key, &e + ); + continue; + } + }; + + let need_block = match PeerNetwork::need_block_or_microblock_stream( + sortdb, + chainstate, + &consensus_hash, + false, + ) { + Ok(x) => x, + Err(e) => { + warn!( + "Failed to determine if we need block for consensus hash {}: {:?}", + &consensus_hash, &e + ); + false + } + }; + + debug!( + "Need block {}/{}? {}", + &consensus_hash, &block_hash, need_block + ); + + if need_block { + // have the downloader request this block if it's new and we don't have it + match self.block_downloader { + Some(ref mut downloader) => { + downloader.hint_block_sortition_height_available( + block_sortition_height, + ibd, + need_block, + ); + + // advance straight to download state if we're in inv state + if self.work_state == PeerNetworkWorkState::BlockInvSync { + debug!("{:?}: advance directly to block download with knowledge of block sortition {}", &self.local_peer, block_sortition_height); + } + self.have_data_to_download = true; + } + None => {} + } + } + } + + to_buffer + } + + /// Handle unsolicited MicroblocksAvailable. If it is valid, and it represents a microblock stream that this + /// peer does not have, then hint to the epoch2x downloader that it needs to go and fetch it. + /// Also, update this peer's copy of the remote sender's inv to indicate that it has the stream, + /// so the downloader can eventually request the stream regardless of whether or not the hint is + /// effective. + /// + /// This function only accepts MicroblocksAvailable messages from outbound peers, since we only + /// track inventories for outbound peers. + /// + /// The caller can call this in one of two ways: with `buffer` set to `true` or `false`. If + /// `buffer` is `true`, then the caller is asking to know if the message can be buffered if it + /// cannot be handled. If it is instead `false`, then the caller is asking to simply try and + /// handle the given message. In both cases, the remote peer's inventory will be updated and + /// the local peer's downloader will be alerted to the presence of these microblocks. + /// + /// Errors pertaining to the validity of the message are logged but not returned. + /// + /// Return whether or not we need to buffer this message for subsequent consideration. + fn handle_unsolicited_MicroblocksAvailable( + &mut self, + sortdb: &SortitionDB, + chainstate: &StacksChainState, + event_id: usize, + new_mblocks: &BlocksAvailableData, + ibd: bool, + buffer: bool, + ) -> bool { + let Some(outbound_neighbor_key) = self.find_outbound_neighbor(event_id) else { + return false; + }; + + debug!( + "{:?}: Process MicroblocksAvailable from {:?} with {} entries", + &self.local_peer, + outbound_neighbor_key, + new_mblocks.available.len() + ); + + let mut to_buffer = false; + for (consensus_hash, block_hash) in new_mblocks.available.iter() { + let mblock_sortition_height = match self.handle_unsolicited_inv_update_epoch2x( + sortdb, + event_id, + &outbound_neighbor_key, + consensus_hash, + true, + ) { + Ok(Some(bsh)) => bsh, + Ok(None) => { + continue; + } + Err(NetError::NotFoundError) => { + if buffer { + debug!("{:?}: Will buffer MicroblocksAvailable for {} until the next burnchain view update", &self.local_peer, &consensus_hash); + to_buffer = true; + } + continue; + } + Err(e) => { + info!( + "{:?}: Failed to handle MicroblocksAvailable({}/{}) from {:?}: {:?}", + &self.local_peer, &consensus_hash, &block_hash, &outbound_neighbor_key, &e + ); + continue; + } + }; + + let need_microblock_stream = match PeerNetwork::need_block_or_microblock_stream( + sortdb, + chainstate, + &consensus_hash, + true, + ) { + Ok(x) => x, + Err(e) => { + warn!("Failed to determine if we need microblock stream for consensus hash {}: {:?}", &consensus_hash, &e); + false + } + }; + + debug!( + "Need microblock stream {}/{}? {}", + &consensus_hash, &block_hash, need_microblock_stream + ); + + if need_microblock_stream { + // have the downloader request this microblock stream if it's new to us + if let Some(downloader) = self.block_downloader.as_mut() { + downloader.hint_microblock_sortition_height_available( + mblock_sortition_height, + ibd, + need_microblock_stream, + ); + + // advance straight to download state if we're in inv state + if self.work_state == PeerNetworkWorkState::BlockInvSync { + debug!("{:?}: advance directly to block download with knowledge of microblock stream {}", &self.local_peer, mblock_sortition_height); + } + self.have_data_to_download = true; + } + } + } + to_buffer + } + + /// Handle unsolicited BlocksData. + /// + /// Don't (yet) validate the data, but do update our inv for the peer that sent it, if we have + /// an outbound connection to that peer. + /// + /// Log but do nothing with errors in validation. + /// + /// The caller can call this in one of two ways: with `buffer` set to `true` or `false`. If + /// `buffer` is `true`, then the caller is asking to know if the message can be buffered if it + /// cannot be handled. If it is instead `false`, then the caller is asking to simply try and + /// handle the given message. In both cases, the block will be checked against the local + /// sortition DB, and if it corresponds to a sortition, the remote peer's inventory will be + /// updated to reflect that it has it. + /// + /// Returns true if we have to buffer this message; false if not. + fn handle_unsolicited_BlocksData( + &mut self, + sortdb: &SortitionDB, + event_id: usize, + new_blocks: &BlocksData, + buffer: bool, + ) -> bool { + let outbound_neighbor_key_opt = self.find_outbound_neighbor(event_id); + + debug!( + "{:?}: Process BlocksData from {:?} with {} entries", + &self.local_peer, + outbound_neighbor_key_opt + .clone() + .or_else(|| { self.check_peer_authenticated(event_id) }), + new_blocks.blocks.len() + ); + + let mut to_buffer = false; + + for BlocksDatum(consensus_hash, block) in new_blocks.blocks.iter() { + let sn = match SortitionDB::get_block_snapshot_consensus( + &sortdb.conn(), + &consensus_hash, + ) { + Ok(Some(sn)) => sn, + Ok(None) => { + if buffer { + debug!( + "{:?}: Will buffer unsolicited BlocksData({}/{}) ({}) -- consensus hash not (yet) recognized", + &self.local_peer, + &consensus_hash, + &block.block_hash(), + StacksBlockHeader::make_index_block_hash( + &consensus_hash, + &block.block_hash() + ) + ); + to_buffer = true; + } else { + debug!( + "{:?}: Will drop unsolicited BlocksData({}/{}) ({}) -- consensus hash not (yet) recognized", + &self.local_peer, + &consensus_hash, + &block.block_hash(), + StacksBlockHeader::make_index_block_hash( + &consensus_hash, + &block.block_hash() + ) + ); + } + continue; + } + Err(e) => { + info!( + "{:?}: Failed to query block snapshot for {}: {:?}", + &self.local_peer, consensus_hash, &e + ); + continue; + } + }; + + if !sn.pox_valid { + info!( + "{:?}: Failed to query snapshot for {}: not on the valid PoX fork", + &self.local_peer, consensus_hash + ); + continue; + } + + if sn.winning_stacks_block_hash != block.block_hash() { + info!( + "{:?}: Ignoring block {} -- winning block was {} (sortition: {})", + &self.local_peer, + block.block_hash(), + sn.winning_stacks_block_hash, + sn.sortition + ); + continue; + } + + // only bother updating the inventory for this event's peer if we have an outbound + // connection to it. + if let Some(outbound_neighbor_key) = outbound_neighbor_key_opt.as_ref() { + let _ = self.handle_unsolicited_inv_update_epoch2x( + sortdb, + event_id, + &outbound_neighbor_key, + &sn.consensus_hash, + false, + ); + } + } + + to_buffer + } + + /// Handle unsolicited MicroblocksData. + /// + /// Don't (yet) validate the data; just verify that it connects to two existing StacksBlocks, + /// and if so, keep it to be passed on to the relayer. + /// + /// Log but do nothing with errors in validation. + /// + /// The caller can call this in one of two ways: with `buffer` set to `true` or `false`. If + /// `buffer` is `true`, then the caller is asking to know if the message can be buffered if it + /// cannot be handled. If it is instead `false`, then the caller is asking to simply try and + /// handle the given message. In both cases, the microblocks will be checked against the local + /// sortition DB and chainstate DB, and if they correspond to a missing stream between two known + /// StacksBlocks, the remote peer's inventory will be updated to reflect that it has this + /// stream. + /// + /// Returns whether or not to buffer. If the microblocks correspond to existing chain state, + /// then this method will indicate to the opposite of `buffer`, which ensures that the messages + /// will never be buffered but instead processed immediately. Otherwise, no buffering will + /// take place. + fn handle_unsolicited_MicroblocksData( + &mut self, + chainstate: &StacksChainState, + event_id: usize, + new_microblocks: &MicroblocksData, + buffer: bool, + ) -> bool { + let outbound_neighbor_key_opt = self.find_outbound_neighbor(event_id); + + debug!( + "{:?}: Process MicroblocksData from {:?} for {} with {} entries", + &self.local_peer, + outbound_neighbor_key_opt.or_else(|| { self.check_peer_authenticated(event_id) }), + &new_microblocks.index_anchor_block, + new_microblocks.microblocks.len() + ); + + // do we have the associated anchored block? + match chainstate.get_block_header_hashes(&new_microblocks.index_anchor_block) { + Ok(Some(_)) => { + // yup; can process now + debug!("{:?}: have microblock parent anchored block {}, so can process its microblocks", &self.local_peer, &new_microblocks.index_anchor_block); + !buffer + } + Ok(None) => { + if buffer { + debug!( + "{:?}: Will buffer unsolicited MicroblocksData({})", + &self.local_peer, &new_microblocks.index_anchor_block + ); + true + } else { + debug!( + "{:?}: Will not buffer unsolicited MicroblocksData({})", + &self.local_peer, &new_microblocks.index_anchor_block + ); + false + } + } + Err(e) => { + warn!( + "{:?}: Failed to get header hashes for {:?}: {:?}", + &self.local_peer, &new_microblocks.index_anchor_block, &e + ); + false + } + } + } + + #[cfg_attr(test, mutants::skip)] + /// Check the signature of a NakamotoBlock against its sortition's reward cycle. + /// The reward cycle must be recent. + pub(crate) fn check_nakamoto_block_signer_signature( + &mut self, + reward_cycle: u64, + nakamoto_block: &NakamotoBlock, + ) -> bool { + let Some(rc_data) = self.current_reward_sets.get(&reward_cycle) else { + info!( + "{:?}: Failed to validate Nakamoto block {}/{}: no reward set", + self.get_local_peer(), + &nakamoto_block.header.consensus_hash, + &nakamoto_block.header.block_hash() + ); + return false; + }; + let Some(reward_set) = rc_data.reward_set() else { + info!( + "{:?}: No reward set for reward cycle {}", + self.get_local_peer(), + reward_cycle + ); + return false; + }; + + if let Err(e) = nakamoto_block.header.verify_signer_signatures(reward_set) { + info!( + "{:?}: signature verification failrue for Nakamoto block {}/{} in reward cycle {}: {:?}", self.get_local_peer(), &nakamoto_block.header.consensus_hash, &nakamoto_block.header.block_hash(), reward_cycle, &e + ); + return false; + } + true + } + + #[cfg_attr(test, mutants::skip)] + /// Find the reward cycle in which to validate the signature for this block. + /// This may not actually correspond to the sortition for this block's tenure -- for example, + /// it may be for a block whose sortition is about to be processed. As such, return both the + /// reward cycle, and whether or not it corresponds to the sortition. + pub(crate) fn find_nakamoto_block_reward_cycle( + &self, + sortdb: &SortitionDB, + nakamoto_block: &NakamotoBlock, + ) -> (Option, bool) { + let (reward_set_sn, can_process) = match SortitionDB::get_block_snapshot_consensus( + &sortdb.conn(), + &nakamoto_block.header.consensus_hash, + ) { + Ok(Some(sn)) => (sn, true), + Ok(None) => { + debug!( + "No sortition {} for block {}", + &nakamoto_block.header.consensus_hash, + &nakamoto_block.block_id() + ); + // we don't have the sortition for this, so we can't process it yet (i.e. we need + // to buffer) + // load the tip so we can load the current reward set data + (self.burnchain_tip.clone(), false) + } + Err(e) => { + info!( + "{:?}: Failed to query block snapshot for {}: {:?}", + self.get_local_peer(), + &nakamoto_block.header.consensus_hash, + &e + ); + return (None, false); + } + }; + + if !reward_set_sn.pox_valid { + info!( + "{:?}: Failed to query snapshot for {}: not on the valid PoX fork", + self.get_local_peer(), + &nakamoto_block.header.consensus_hash + ); + return (None, false); + } + + let reward_set_sn_rc = self + .burnchain + .pox_reward_cycle(reward_set_sn.block_height) + .expect("FATAL: sortition has no reward cycle"); + + return (Some(reward_set_sn_rc), can_process); + } + + #[cfg_attr(test, mutants::skip)] + /// Determine if an unsolicited NakamotoBlockData message contains data we can potentially + /// buffer. Returns whether or not the block can be buffered. + pub(crate) fn is_nakamoto_block_bufferable( + &mut self, + sortdb: &SortitionDB, + chainstate: &StacksChainState, + nakamoto_block: &NakamotoBlock, + ) -> bool { + if chainstate + .nakamoto_blocks_db() + .has_nakamoto_block_with_index_hash(&nakamoto_block.block_id()) + .unwrap_or(false) + { + debug!( + "{:?}: Aleady have Nakamoto block {}", + &self.local_peer, + &nakamoto_block.block_id() + ); + return false; + } + + let (sn_rc_opt, can_process) = + self.find_nakamoto_block_reward_cycle(sortdb, nakamoto_block); + let Some(sn_rc) = sn_rc_opt else { + return false; + }; + + if !self.check_nakamoto_block_signer_signature(sn_rc, nakamoto_block) { + return false; + } + + // the block is well-formed, but we'd buffer if we can't process it yet + !can_process + } + + #[cfg_attr(test, mutants::skip)] + /// Handle an unsolicited NakamotoBlocksData message. + /// + /// Unlike Stacks epoch 2.x blocks, no change to the remote peer's inventory will take place. + /// This is because a 1-bit indicates the _entire_ tenure is present for a given sortition, and + /// this is usually impossible to tell here. Instead, this handler will return `true` if the + /// sortition identified by the block's consensus hash is known to this node (in which case, + /// the relayer can store it to staging). + /// + /// Returns true if this message should be buffered and re-processed + pub(crate) fn inner_handle_unsolicited_NakamotoBlocksData( + &mut self, + sortdb: &SortitionDB, + chainstate: &StacksChainState, + remote_neighbor_key_opt: Option, + nakamoto_blocks: &NakamotoBlocksData, + ) -> bool { + debug!( + "{:?}: Process NakamotoBlocksData from {:?} with {} entries", + &self.local_peer, + &remote_neighbor_key_opt, + nakamoto_blocks.blocks.len() + ); + + let mut to_buffer = false; + for nakamoto_block in nakamoto_blocks.blocks.iter() { + if self.is_nakamoto_block_bufferable(sortdb, chainstate, nakamoto_block) { + debug!( + "{:?}: Will buffer unsolicited NakamotoBlocksData({}) ({})", + &self.local_peer, + &nakamoto_block.block_id(), + &nakamoto_block.header.consensus_hash, + ); + to_buffer = true; + }; + } + to_buffer + } + + #[cfg_attr(test, mutants::skip)] + /// Handle an unsolicited NakamotoBlocksData message. + /// + /// Unlike Stacks epoch 2.x blocks, no change to the remote peer's inventory will take place. + /// This is because a 1-bit indicates the _entire_ tenure is present for a given sortition, and + /// this is usually impossible to tell here. Instead, this handler will return `true` if the + /// sortition identified by the block's consensus hash is known to this node (in which case, + /// the relayer can store it to staging). + /// + /// Returns true if this message should be buffered and re-processed + /// + /// Wraps inner_handle_unsolicited_NakamotoBlocksData by resolving the event_id to the optional + /// neighbor key. + fn handle_unsolicited_NakamotoBlocksData( + &mut self, + sortdb: &SortitionDB, + chainstate: &StacksChainState, + event_id: usize, + nakamoto_blocks: &NakamotoBlocksData, + ) -> bool { + let outbound_neighbor_key_opt = self + .find_outbound_neighbor(event_id) + .or_else(|| self.check_peer_authenticated(event_id)); + self.inner_handle_unsolicited_NakamotoBlocksData( + sortdb, + chainstate, + outbound_neighbor_key_opt, + nakamoto_blocks, + ) + } + + #[cfg_attr(test, mutants::skip)] + /// Handle an unsolicited message, with either the intention of just processing it (in which + /// case, `buffer` will be `false`), or with the intention of not only processing it, but also + /// determining if it can be bufferred and retried later (in which case, `buffer` will be + /// `true`). + /// + /// Returns (true, x) if we should buffer the message and try processing it again later. + /// Returns (false, x) if we should *not* buffer this message, because it *won't* be valid + /// later. + /// + /// Returns (x, true) if we should forward the message to the relayer, so it can be processed. + /// Returns (x, false) if we should *not* forward the message to the relayer, because it will + /// *not* be processed. + fn handle_unsolicited_message( + &mut self, + sortdb: &SortitionDB, + chainstate: &StacksChainState, + event_id: usize, + preamble: &Preamble, + payload: &StacksMessageType, + ibd: bool, + buffer: bool, + ) -> (bool, bool) { + match payload { + // Update our inv state for this peer, but only do so if we have an + // outbound connection to it and it's authenticated (we don't synchronize inv + // state with inbound peers). Since we will have received this message + // from an _inbound_ conversation, we need to find the reciprocal _outbound_ + // conversation and use _that_ conversation's neighbor key to identify + // which inventory we need to update. + StacksMessageType::BlocksAvailable(ref new_blocks) => { + // no need to forward to relayer + let to_buffer = self.handle_unsolicited_BlocksAvailable( + sortdb, chainstate, event_id, new_blocks, ibd, buffer, + ); + (to_buffer, false) + } + StacksMessageType::MicroblocksAvailable(ref new_mblocks) => { + // no need to forward to relayer + let to_buffer = self.handle_unsolicited_MicroblocksAvailable( + sortdb, + chainstate, + event_id, + new_mblocks, + ibd, + buffer, + ); + (to_buffer, false) + } + StacksMessageType::Blocks(ref new_blocks) => { + // update inv state for this peer, and always forward to the relayer + let to_buffer = + self.handle_unsolicited_BlocksData(sortdb, event_id, new_blocks, buffer); + + // forward to relayer for processing + (to_buffer, true) + } + StacksMessageType::Microblocks(ref new_mblocks) => { + // update inv state for this peer, and optionally forward to the relayer. + // Note that if these microblocks can be processed *now*, then they *will not* be + // buffered + let to_buffer = self.handle_unsolicited_MicroblocksData( + chainstate, + event_id, + new_mblocks, + buffer, + ); + + // only forward to the relayer if we don't need to buffer it. + (to_buffer, true) + } + StacksMessageType::NakamotoBlocks(ref new_blocks) => { + let to_buffer = if buffer { + self.handle_unsolicited_NakamotoBlocksData( + sortdb, chainstate, event_id, new_blocks, + ) + } else { + // nothing to do if we're not querying about whether we can buffer this. + false + }; + + (to_buffer, true) + } + StacksMessageType::StackerDBPushChunk(ref data) => { + match self.handle_unsolicited_StackerDBPushChunk(event_id, preamble, data) { + Ok(x) => { + // don't buffer, but do reject if invalid + (false, x) + } + Err(e) => { + info!( + "{:?}: failed to handle unsolicited {:?}: {:?}", + &self.local_peer, payload, &e + ); + (false, false) + } + } + } + _ => (false, true), + } + } + + #[cfg_attr(test, mutants::skip)] + /// Handle unsolicited messages propagated up to us from our ongoing ConversationP2Ps. + /// Return messages that we couldn't handle here, but key them by neighbor, not event, so the + /// relayer can do something useful with them. + /// + /// Invalid messages are dropped silently, with an log message. + /// + /// If `buffer` is true, then this message will be buffered up and tried again in a subsequent + /// call if the handler for it deems the message valid. + /// + /// If `buffer` is false, then if the message handler deems the message valid, it will be + /// forwraded to the relayer. + /// + /// Returns the messages to be forward to the relayer, keyed by sender. + pub fn handle_unsolicited_messages( + &mut self, + sortdb: &SortitionDB, + chainstate: &StacksChainState, + unsolicited: HashMap>, + ibd: bool, + buffer: bool, + ) -> HashMap> { + let mut unhandled: HashMap> = HashMap::new(); + for (event_id, messages) in unsolicited.into_iter() { + if messages.len() == 0 { + // no messages for this event + continue; + } + if buffer && self.check_peer_authenticated(event_id).is_none() { + if cfg!(test) + && self + .connection_opts + .test_disable_unsolicited_message_authentication + { + test_debug!( + "{:?}: skip unsolicited message authentication", + &self.local_peer + ); + } else { + // do not buffer messages from unknown peers + // (but it's fine to process messages that were previosuly buffered, since the peer + // may have since disconnected) + debug!("Will not handle unsolicited messages from unauthenticated or dead event {}", event_id); + continue; + } + }; + let neighbor_key = if let Some(convo) = self.peers.get(&event_id) { + convo.to_neighbor_key() + } else { + debug!( + "{:?}: No longer such neighbor event={}, dropping {} unsolicited messages", + &self.local_peer, + event_id, + messages.len() + ); + continue; + }; + + debug!("{:?}: Process {} unsolicited messages from {:?}", &self.local_peer, messages.len(), &neighbor_key; "buffer" => %buffer); + + for message in messages.into_iter() { + if buffer + && !self.can_buffer_data_message( + event_id, + self.pending_messages.get(&event_id).unwrap_or(&vec![]), + &message, + ) + { + // asked to buffer, but we don't have space + continue; + } + + if !buffer { + debug!( + "{:?}: Re-try handling buffered message {} from {:?}", + &self.local_peer, + &message.payload.get_message_description(), + &neighbor_key + ); + } + let (to_buffer, relay) = self.handle_unsolicited_message( + sortdb, + chainstate, + event_id, + &message.preamble, + &message.payload, + ibd, + buffer, + ); + if buffer && to_buffer { + self.buffer_data_message(event_id, message); + } else if relay { + // forward to relayer for processing + debug!( + "{:?}: Will forward message {} from {:?} to relayer", + &self.local_peer, + &message.payload.get_message_description(), + &neighbor_key + ); + if let Some(msgs) = unhandled.get_mut(&neighbor_key) { + msgs.push(message); + } else { + unhandled.insert(neighbor_key.clone(), vec![message]); + } + } + } + } + unhandled + } +} diff --git a/stackslib/src/util_lib/bloom.rs b/stackslib/src/util_lib/bloom.rs index d34fca233ab..d1632f0b14f 100644 --- a/stackslib/src/util_lib/bloom.rs +++ b/stackslib/src/util_lib/bloom.rs @@ -22,9 +22,11 @@ use std::io::{Read, Seek, SeekFrom, Write}; use rand::prelude::*; use rand::thread_rng; use rusqlite::blob::Blob; -use rusqlite::{Error as sqlite_error, Row, ToSql, NO_PARAMS}; +use rusqlite::types::ToSql; +use rusqlite::{params, Error as sqlite_error, Row}; use siphasher::sip::SipHasher; // this is SipHash-2-4 use stacks_common::codec::{read_next, write_next, Error as codec_error, StacksMessageCodec}; +use stacks_common::types::sqlite::NO_PARAMS; use stacks_common::util::hash::{to_hex, Sha512Trunc256Sum}; use crate::util_lib::db::{query_expect_row, DBConn, DBTx, Error as db_error}; @@ -360,7 +362,7 @@ impl BloomCounter { "INSERT INTO {} (counts, num_bins, num_hashes, hasher) VALUES (?1, ?2, ?3, ?4)", table_name ); - let args: &[&dyn ToSql] = &[&counts_vec, &num_bins, &num_hashes, &hasher_vec]; + let args = params![counts_vec, num_bins, num_hashes, hasher_vec]; tx.execute(&sql, args).map_err(db_error::SqliteError)?; @@ -381,7 +383,7 @@ impl BloomCounter { let sql = format!("SELECT rowid,* FROM {}", table_name); let result = conn.query_row_and_then(&sql, NO_PARAMS, |row| { let mut hasher_blob = row - .get_raw("hasher") + .get_ref("hasher")? .as_blob() .expect("Unable to read hasher as blob"); let hasher = diff --git a/stackslib/src/util_lib/db.rs b/stackslib/src/util_lib/db.rs index 940d79bafee..f54a9c97ec7 100644 --- a/stackslib/src/util_lib/db.rs +++ b/stackslib/src/util_lib/db.rs @@ -14,6 +14,7 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . +use std::backtrace::Backtrace; use std::io::Error as IOError; use std::ops::{Deref, DerefMut}; use std::path::{Path, PathBuf}; @@ -24,18 +25,21 @@ use clarity::vm::types::QualifiedContractIdentifier; use rand::{thread_rng, Rng, RngCore}; use rusqlite::types::{FromSql, ToSql}; use rusqlite::{ - Connection, Error as sqlite_error, OpenFlags, OptionalExtension, Row, Transaction, - TransactionBehavior, NO_PARAMS, + params, Connection, Error as sqlite_error, OpenFlags, OptionalExtension, Params, Row, + Transaction, TransactionBehavior, }; use serde_json::Error as serde_error; use stacks_common::types::chainstate::{SortitionId, StacksAddress, StacksBlockId, TrieHash}; +use stacks_common::types::sqlite::NO_PARAMS; use stacks_common::types::Address; use stacks_common::util::hash::to_hex; use stacks_common::util::secp256k1::{Secp256k1PrivateKey, Secp256k1PublicKey}; use stacks_common::util::sleep_ms; +use crate::chainstate::burn::db::sortdb::SortitionDB; use crate::chainstate::stacks::index::marf::{MarfConnection, MarfTransaction, MARF}; use crate::chainstate::stacks::index::{Error as MARFError, MARFValue, MarfTrieId}; +use crate::core::{StacksEpoch, StacksEpochId}; pub type DBConn = rusqlite::Connection; pub type DBTx<'a> = rusqlite::Transaction<'a>; @@ -394,8 +398,7 @@ fn log_sql_eqp(_conn: &Connection, _sql_query: &str) {} /// boilerplate code for querying rows pub fn query_rows(conn: &Connection, sql_query: &str, sql_args: P) -> Result, Error> where - P: IntoIterator, - P::Item: ToSql, + P: Params, T: FromRow, { log_sql_eqp(conn, sql_query); @@ -409,8 +412,7 @@ where /// if more than 1 row is returned, excess rows are ignored. pub fn query_row(conn: &Connection, sql_query: &str, sql_args: P) -> Result, Error> where - P: IntoIterator, - P::Item: ToSql, + P: Params, T: FromRow, { log_sql_eqp(conn, sql_query); @@ -430,8 +432,7 @@ pub fn query_expect_row( sql_args: P, ) -> Result, Error> where - P: IntoIterator, - P::Item: ToSql, + P: Params, T: FromRow, { log_sql_eqp(conn, sql_query); @@ -456,8 +457,7 @@ pub fn query_row_panic( panic_message: F, ) -> Result, Error> where - P: IntoIterator, - P::Item: ToSql, + P: Params, T: FromRow, F: FnOnce() -> String, { @@ -482,8 +482,7 @@ pub fn query_row_columns( column_name: &str, ) -> Result, Error> where - P: IntoIterator, - P::Item: ToSql, + P: Params, T: FromColumn, { log_sql_eqp(conn, sql_query); @@ -503,8 +502,7 @@ where /// Boilerplate for querying a single integer (first and only item of the query must be an int) pub fn query_int

(conn: &Connection, sql_query: &str, sql_args: P) -> Result where - P: IntoIterator, - P::Item: ToSql, + P: Params, { log_sql_eqp(conn, sql_query); let mut stmt = conn.prepare(sql_query)?; @@ -527,8 +525,7 @@ where pub fn query_count

(conn: &Connection, sql_query: &str, sql_args: P) -> Result where - P: IntoIterator, - P::Item: ToSql, + P: Params, { query_int(conn, sql_query, sql_args) } @@ -672,7 +669,14 @@ pub fn tx_busy_handler(run_count: i32) -> bool { debug!( "Database is locked; sleeping {}ms and trying again", - &sleep_count + &sleep_count; + "backtrace" => ?{ + if run_count > 10 && run_count % 10 == 0 { + Some(Backtrace::capture()) + } else { + None + } + }, ); sleep_ms(sleep_count); @@ -770,7 +774,7 @@ fn load_indexed(conn: &DBConn, marf_value: &MARFValue) -> Result, .prepare("SELECT value FROM __fork_storage WHERE value_hash = ?1 LIMIT 2") .map_err(Error::SqliteError)?; let mut rows = stmt - .query(&[&marf_value.to_hex() as &dyn ToSql]) + .query(params![marf_value.to_hex()]) .map_err(Error::SqliteError)?; let mut value = None; @@ -906,6 +910,12 @@ impl<'a, C: Clone, T: MarfTrieId> IndexDBTx<'a, C, T> { get_indexed(self.index_mut(), header_hash, key) } + /// Get a value from the fork index, but with a read-only reference + pub fn get_indexed_ref(&self, header_hash: &T, key: &str) -> Result, Error> { + let mut ro_index = self.index().reopen_readonly()?; + get_indexed(&mut ro_index, header_hash, key) + } + /// Put all keys and values in a single MARF transaction, and seal it. /// This is a one-time operation; subsequent calls will panic. You should follow this up with /// a commit if you want to save the MARF state. diff --git a/testnet/stacks-node/Cargo.toml b/testnet/stacks-node/Cargo.toml index 72cc8d2491e..aa72f814db8 100644 --- a/testnet/stacks-node/Cargo.toml +++ b/testnet/stacks-node/Cargo.toml @@ -31,6 +31,7 @@ wsts = { workspace = true } rand = { workspace = true } rand_core = { workspace = true } hashbrown = { workspace = true } +rusqlite = { workspace = true } [target.'cfg(not(any(target_os = "macos", target_os="windows", target_arch = "arm")))'.dependencies] tikv-jemallocator = {workspace = true} @@ -49,10 +50,6 @@ tracing-subscriber = { version = "0.3.17", features = ["env-filter"] } wsts = {workspace = true} mutants = "0.0.3" -[dependencies.rusqlite] -version = "=0.24.2" -features = ["blob", "serde_json", "i128_blob", "bundled", "trace"] - [[bin]] name = "stacks-node" path = "src/main.rs" @@ -62,7 +59,8 @@ name = "stacks-events" path = "src/stacks_events.rs" [features] -monitoring_prom = ["stacks/monitoring_prom"] +monitoring_prom = ["stacks/monitoring_prom", "libsigner/monitoring_prom", "stacks-signer/monitoring_prom"] slog_json = ["stacks/slog_json", "stacks-common/slog_json", "clarity/slog_json"] prod-genesis-chainstate = [] default = [] +testing = [] diff --git a/testnet/stacks-node/src/burnchains/bitcoin_regtest_controller.rs b/testnet/stacks-node/src/burnchains/bitcoin_regtest_controller.rs index f96c0c198b7..39ef40490b4 100644 --- a/testnet/stacks-node/src/burnchains/bitcoin_regtest_controller.rs +++ b/testnet/stacks-node/src/burnchains/bitcoin_regtest_controller.rs @@ -57,7 +57,11 @@ use stacks_common::util::sleep_ms; use super::super::operations::BurnchainOpSigner; use super::super::Config; use super::{BurnchainController, BurnchainTip, Error as BurnchainControllerError}; -use crate::config::BurnchainConfig; +use crate::config::{ + BurnchainConfig, OP_TX_ANY_ESTIM_SIZE, OP_TX_DELEGATE_STACKS_ESTIM_SIZE, + OP_TX_PRE_STACKS_ESTIM_SIZE, OP_TX_STACK_STX_ESTIM_SIZE, OP_TX_TRANSFER_STACKS_ESTIM_SIZE, + OP_TX_VOTE_AGG_ESTIM_SIZE, +}; /// The number of bitcoin blocks that can have /// passed since the UTXO cache was last refreshed before @@ -125,6 +129,8 @@ pub fn addr2str(btc_addr: &BitcoinAddress) -> String { format!("{}", &btc_addr) } +// TODO: add tests from mutation testing results #4862 +#[cfg_attr(test, mutants::skip)] pub fn burnchain_params_from_config(config: &BurnchainConfig) -> BurnchainParameters { let (network, _) = config.get_bitcoin_network(); let mut params = BurnchainParameters::from_params(&config.chain, &network) @@ -135,6 +141,8 @@ pub fn burnchain_params_from_config(config: &BurnchainConfig) -> BurnchainParame params } +// TODO: add tests from mutation testing results #4863 +#[cfg_attr(test, mutants::skip)] /// Helper method to create a BitcoinIndexer pub fn make_bitcoin_indexer( config: &Config, @@ -272,6 +280,8 @@ impl BitcoinRegtestController { BitcoinRegtestController::with_burnchain(config, coordinator_channel, None, None) } + // TODO: add tests from mutation testing results #4864 + #[cfg_attr(test, mutants::skip)] pub fn with_burnchain( config: Config, coordinator_channel: Option, @@ -341,6 +351,8 @@ impl BitcoinRegtestController { } } + // TODO: add tests from mutation testing results #4864 + #[cfg_attr(test, mutants::skip)] /// create a dummy bitcoin regtest controller. /// used just for submitting bitcoin ops. pub fn new_dummy(config: Config) -> Self { @@ -703,8 +715,8 @@ impl BitcoinRegtestController { utxos_to_exclude: Option, block_height: u64, ) -> Option { - // if mock mining, do not even both requesting UTXOs - if self.config.node.mock_mining { + // if mock mining, do not even bother requesting UTXOs + if self.config.get_node_config(false).mock_mining { return None; } @@ -860,6 +872,7 @@ impl BitcoinRegtestController { fee_rate, &mut utxos, signer, + false, )?; increment_btc_ops_sent_counter(); @@ -942,7 +955,7 @@ impl BitcoinRegtestController { utxo_to_use: Option, ) -> Option { let public_key = signer.get_public_key(); - let max_tx_size = 230; + let max_tx_size = OP_TX_TRANSFER_STACKS_ESTIM_SIZE; let (mut tx, mut utxos) = if let Some(utxo) = utxo_to_use { ( Transaction { @@ -997,6 +1010,7 @@ impl BitcoinRegtestController { get_satoshis_per_byte(&self.config), &mut utxos, signer, + false, )?; increment_btc_ops_sent_counter(); @@ -1024,7 +1038,7 @@ impl BitcoinRegtestController { utxo_to_use: Option, ) -> Option { let public_key = signer.get_public_key(); - let max_tx_size = 230; + let max_tx_size = OP_TX_DELEGATE_STACKS_ESTIM_SIZE; let (mut tx, mut utxos) = if let Some(utxo) = utxo_to_use { ( @@ -1080,6 +1094,7 @@ impl BitcoinRegtestController { get_satoshis_per_byte(&self.config), &mut utxos, signer, + false, )?; increment_btc_ops_sent_counter(); @@ -1102,7 +1117,7 @@ impl BitcoinRegtestController { utxo_to_use: Option, ) -> Option { let public_key = signer.get_public_key(); - let max_tx_size = 230; + let max_tx_size = OP_TX_VOTE_AGG_ESTIM_SIZE; let (mut tx, mut utxos) = if let Some(utxo) = utxo_to_use { ( @@ -1154,6 +1169,7 @@ impl BitcoinRegtestController { get_satoshis_per_byte(&self.config), &mut utxos, signer, + false, )?; increment_btc_ops_sent_counter(); @@ -1196,9 +1212,11 @@ impl BitcoinRegtestController { signer: &mut BurnchainOpSigner, ) -> Option { let public_key = signer.get_public_key(); - let max_tx_size = 280; + let max_tx_size = OP_TX_PRE_STACKS_ESTIM_SIZE; + + let max_tx_size_any_op = OP_TX_ANY_ESTIM_SIZE; + let output_amt = DUST_UTXO_LIMIT + max_tx_size_any_op * get_satoshis_per_byte(&self.config); - let output_amt = DUST_UTXO_LIMIT + max_tx_size * get_satoshis_per_byte(&self.config); let (mut tx, mut utxos) = self.prepare_tx(epoch_id, &public_key, output_amt, None, None, 0)?; @@ -1230,6 +1248,7 @@ impl BitcoinRegtestController { get_satoshis_per_byte(&self.config), &mut utxos, signer, + false, )?; increment_btc_ops_sent_counter(); @@ -1242,6 +1261,7 @@ impl BitcoinRegtestController { Some(tx) } + #[cfg_attr(test, mutants::skip)] #[cfg(not(test))] fn build_stack_stx_tx( &mut self, @@ -1262,7 +1282,7 @@ impl BitcoinRegtestController { utxo_to_use: Option, ) -> Option { let public_key = signer.get_public_key(); - let max_tx_size = 250; + let max_tx_size = OP_TX_STACK_STX_ESTIM_SIZE; let (mut tx, mut utxos) = if let Some(utxo) = utxo_to_use { ( @@ -1316,6 +1336,7 @@ impl BitcoinRegtestController { get_satoshis_per_byte(&self.config), &mut utxos, signer, + false, )?; increment_btc_ops_sent_counter(); @@ -1406,6 +1427,7 @@ impl BitcoinRegtestController { fee_rate, &mut utxos, signer, + true, // only block commit op requires change output to exist )?; let serialized_tx = SerializedTx::new(tx.clone()); @@ -1617,6 +1639,8 @@ impl BitcoinRegtestController { } } + // TODO: add tests from mutation testing results #4865 + #[cfg_attr(test, mutants::skip)] fn prepare_tx( &mut self, epoch_id: StacksEpochId, @@ -1674,6 +1698,7 @@ impl BitcoinRegtestController { fee_rate: u64, utxos_set: &mut UTXOSet, signer: &mut BurnchainOpSigner, + force_change_output: bool, ) -> Option<()> { // spend UTXOs in order by confirmations. Spend the least-confirmed UTXO first, and in the // event of a tie, spend the smallest-value UTXO first. @@ -1704,6 +1729,7 @@ impl BitcoinRegtestController { spent_in_outputs + min_tx_size * fee_rate + estimated_rbf, &mut utxos_cloned, signer, + force_change_output, ); let serialized_tx = SerializedTx::new(tx_cloned); cmp::max(min_tx_size, serialized_tx.bytes.len() as u64) @@ -1720,6 +1746,7 @@ impl BitcoinRegtestController { spent_in_outputs + tx_size * fee_rate + rbf_fee, utxos_set, signer, + force_change_output, ); signer.dispose(); Some(()) @@ -1733,38 +1760,45 @@ impl BitcoinRegtestController { &mut self, epoch_id: StacksEpochId, tx: &mut Transaction, - total_to_spend: u64, + tx_cost: u64, utxos_set: &mut UTXOSet, signer: &mut BurnchainOpSigner, + force_change_output: bool, ) -> bool { let mut public_key = signer.get_public_key(); - let mut total_consumed = 0; + + let total_target = if force_change_output { + tx_cost + DUST_UTXO_LIMIT + } else { + tx_cost + }; // select UTXOs until we have enough to cover the cost + let mut total_consumed = 0; let mut available_utxos = vec![]; available_utxos.append(&mut utxos_set.utxos); for utxo in available_utxos.into_iter() { total_consumed += utxo.amount; utxos_set.utxos.push(utxo); - if total_consumed >= total_to_spend { + if total_consumed >= total_target { break; } } - if total_consumed < total_to_spend { + if total_consumed < total_target { warn!( "Consumed total {} is less than intended spend: {}", - total_consumed, total_to_spend + total_consumed, total_target ); return false; } // Append the change output - let value = total_consumed - total_to_spend; + let value = total_consumed - tx_cost; debug!( "Payments value: {:?}, total_consumed: {:?}, total_spent: {:?}", - value, total_consumed, total_to_spend + value, total_consumed, total_target ); if value >= DUST_UTXO_LIMIT { let change_output = if self.config.miner.segwit && epoch_id >= StacksEpochId::Epoch21 { @@ -1984,6 +2018,8 @@ impl BitcoinRegtestController { self.config.miner.segwit = segwit; } + // TODO: add tests from mutation testing results #4866 + #[cfg_attr(test, mutants::skip)] pub fn make_operation_tx( &mut self, epoch_id: StacksEpochId, @@ -2024,6 +2060,61 @@ impl BitcoinRegtestController { let tx: Transaction = btc_deserialize(&hex_bytes(&txstr).unwrap()).unwrap(); tx } + + /// Produce `num_blocks` regtest bitcoin blocks, sending the bitcoin coinbase rewards + /// to the bitcoin single sig addresses corresponding to `pks` in a round robin fashion. + #[cfg(test)] + pub fn bootstrap_chain_to_pks(&mut self, num_blocks: usize, pks: &[Secp256k1PublicKey]) { + info!("Creating wallet if it does not exist"); + if let Err(e) = self.create_wallet_if_dne() { + error!("Error when creating wallet: {e:?}"); + } + + for pk in pks { + debug!("Import public key '{}'", &pk.to_hex()); + if let Err(e) = BitcoinRPCRequest::import_public_key(&self.config, &pk) { + warn!("Error when importing pubkey: {e:?}"); + } + } + + if pks.len() == 1 { + // if we only have one pubkey, just generate all the blocks at once + let address = self.get_miner_address(StacksEpochId::Epoch21, &pks[0]); + debug!( + "Generate to address '{}' for public key '{}'", + &addr2str(&address), + &pks[0].to_hex() + ); + if let Err(e) = BitcoinRPCRequest::generate_to_address( + &self.config, + num_blocks.try_into().unwrap(), + addr2str(&address), + ) { + error!("Bitcoin RPC failure: error generating block {:?}", e); + panic!(); + } + return; + } + + // otherwise, round robin generate blocks + for i in 0..num_blocks { + let pk = &pks[usize::try_from(i % pks.len()).unwrap()]; + let address = self.get_miner_address(StacksEpochId::Epoch21, pk); + if i < pks.len() { + debug!( + "Generate to address '{}' for public key '{}'", + &addr2str(&address), + &pk.to_hex(), + ); + } + if let Err(e) = + BitcoinRPCRequest::generate_to_address(&self.config, 1, addr2str(&address)) + { + error!("Bitcoin RPC failure: error generating block {:?}", e); + panic!(); + } + } + } } impl BurnchainController for BitcoinRegtestController { @@ -2139,45 +2230,19 @@ impl BurnchainController for BitcoinRegtestController { #[cfg(test)] fn bootstrap_chain(&mut self, num_blocks: u64) { - if let Some(ref local_mining_pubkey) = &self.config.burnchain.local_mining_public_key { - // NOTE: miner address is whatever the miner's segwit setting says it is here - let mut local_mining_pubkey = - Secp256k1PublicKey::from_hex(local_mining_pubkey).unwrap(); - let address = self.get_miner_address(StacksEpochId::Epoch21, &local_mining_pubkey); - - if self.config.miner.segwit { - local_mining_pubkey.set_compressed(true); - } - - info!("Creating wallet if it does not exist"); - match self.create_wallet_if_dne() { - Err(e) => warn!("Error when creating wallet: {:?}", e), - _ => {} - } - - test_debug!("Import public key '{}'", &local_mining_pubkey.to_hex()); - - let _result = BitcoinRPCRequest::import_public_key(&self.config, &local_mining_pubkey); + let Some(ref local_mining_pubkey) = &self.config.burnchain.local_mining_public_key else { + warn!("No local mining pubkey while bootstrapping bitcoin regtest, will not generate bitcoin blocks"); + return; + }; - test_debug!( - "Generate to address '{}' for public key '{}'", - &addr2str(&address), - &local_mining_pubkey.to_hex() - ); - let result = BitcoinRPCRequest::generate_to_address( - &self.config, - num_blocks, - addr2str(&address), - ); + // NOTE: miner address is whatever the miner's segwit setting says it is here + let mut local_mining_pubkey = Secp256k1PublicKey::from_hex(local_mining_pubkey).unwrap(); - match result { - Ok(_) => {} - Err(e) => { - error!("Bitcoin RPC failure: error generating block {:?}", e); - panic!(); - } - } + if self.config.miner.segwit { + local_mining_pubkey.set_compressed(true); } + + self.bootstrap_chain_to_pks(num_blocks.try_into().unwrap(), &[local_mining_pubkey]) } } diff --git a/testnet/stacks-node/src/burnchains/mocknet_controller.rs b/testnet/stacks-node/src/burnchains/mocknet_controller.rs index 3db583aa480..6bb958e0708 100644 --- a/testnet/stacks-node/src/burnchains/mocknet_controller.rs +++ b/testnet/stacks-node/src/burnchains/mocknet_controller.rs @@ -199,6 +199,7 @@ impl BurnchainController for MocknetController { } BlockstackOperationType::LeaderBlockCommit(payload) => { BlockstackOperationType::LeaderBlockCommit(LeaderBlockCommitOp { + treatment: vec![], sunset_burn: 0, block_header_hash: payload.block_header_hash, new_seed: payload.new_seed, diff --git a/testnet/stacks-node/src/chain_data.rs b/testnet/stacks-node/src/chain_data.rs index 4170cf6f6d9..b1e32c15ea2 100644 --- a/testnet/stacks-node/src/chain_data.rs +++ b/testnet/stacks-node/src/chain_data.rs @@ -163,6 +163,11 @@ impl MinerStats { // calculate the burn distribution from these operations. // The resulting distribution will contain the user burns that match block commits let burn_dist = BurnSamplePoint::make_min_median_distribution( + if burnchain.is_in_prepare_phase(burn_block_height) { + 1 + } else { + MINING_COMMITMENT_WINDOW + }, windowed_block_commits, windowed_missed_commits, burn_blocks, @@ -272,6 +277,7 @@ impl MinerStats { // mocked commit let mocked_commit = LeaderBlockCommitOp { + treatment: vec![], sunset_burn: 0, block_header_hash: BlockHeaderHash(DEADBEEF.clone()), new_seed: VRFSeed(DEADBEEF.clone()), @@ -436,6 +442,7 @@ impl MinerStats { for (miner, last_commit) in active_miners_and_commits.iter() { if !commit_table.contains_key(miner) { let mocked_commit = LeaderBlockCommitOp { + treatment: vec![], sunset_burn: 0, block_header_hash: BlockHeaderHash(DEADBEEF.clone()), new_seed: VRFSeed(DEADBEEF.clone()), @@ -545,6 +552,7 @@ pub mod tests { #[test] fn test_burn_dist_to_prob_dist() { let block_commit_1 = LeaderBlockCommitOp { + treatment: vec![], sunset_burn: 0, block_header_hash: BlockHeaderHash([0x22; 32]), new_seed: VRFSeed([0x33; 32]), @@ -577,6 +585,7 @@ pub mod tests { }; let block_commit_2 = LeaderBlockCommitOp { + treatment: vec![], sunset_burn: 0, block_header_hash: BlockHeaderHash([0x22; 32]), new_seed: VRFSeed([0x33; 32]), @@ -612,6 +621,7 @@ pub mod tests { }; let block_commit_3 = LeaderBlockCommitOp { + treatment: vec![], sunset_burn: 0, block_header_hash: BlockHeaderHash([0x22; 32]), new_seed: VRFSeed([0x33; 32]), @@ -647,6 +657,7 @@ pub mod tests { }; let burn_dist = vec![ BurnSamplePoint { + frequency: 10, burns: block_commit_1.burn_fee.into(), median_burn: block_commit_2.burn_fee.into(), range_start: Uint256::zero(), @@ -659,6 +670,7 @@ pub mod tests { candidate: block_commit_1.clone(), }, BurnSamplePoint { + frequency: 10, burns: block_commit_2.burn_fee.into(), median_burn: block_commit_2.burn_fee.into(), range_start: Uint256([ @@ -676,6 +688,7 @@ pub mod tests { candidate: block_commit_2.clone(), }, BurnSamplePoint { + frequency: 10, burns: (block_commit_3.burn_fee).into(), median_burn: block_commit_3.burn_fee.into(), range_start: Uint256([ @@ -809,6 +822,7 @@ EOF ( "miner-1".to_string(), LeaderBlockCommitOp { + treatment: vec![], sunset_burn: 0, block_header_hash: BlockHeaderHash([0x22; 32]), new_seed: VRFSeed([0x33; 32]), @@ -840,6 +854,7 @@ EOF ( "miner-2".to_string(), LeaderBlockCommitOp { + treatment: vec![], sunset_burn: 0, block_header_hash: BlockHeaderHash([0x22; 32]), new_seed: VRFSeed([0x33; 32]), @@ -874,6 +889,7 @@ EOF ( "miner-3".to_string(), LeaderBlockCommitOp { + treatment: vec![], sunset_burn: 0, block_header_hash: BlockHeaderHash([0x22; 32]), new_seed: VRFSeed([0x33; 32]), @@ -908,6 +924,7 @@ EOF let unconfirmed_block_commits = vec![ LeaderBlockCommitOp { + treatment: vec![], sunset_burn: 0, block_header_hash: BlockHeaderHash([0x22; 32]), new_seed: VRFSeed([0x33; 32]), @@ -934,6 +951,7 @@ EOF burn_header_hash: BurnchainHeaderHash([0x01; 32]), }, LeaderBlockCommitOp { + treatment: vec![], sunset_burn: 0, block_header_hash: BlockHeaderHash([0x22; 32]), new_seed: VRFSeed([0x33; 32]), @@ -960,6 +978,7 @@ EOF burn_header_hash: BurnchainHeaderHash([0x01; 32]), }, LeaderBlockCommitOp { + treatment: vec![], sunset_burn: 0, block_header_hash: BlockHeaderHash([0x22; 32]), new_seed: VRFSeed([0x33; 32]), @@ -986,6 +1005,7 @@ EOF burn_header_hash: BurnchainHeaderHash([0x01; 32]), }, LeaderBlockCommitOp { + treatment: vec![], sunset_burn: 0, block_header_hash: BlockHeaderHash([0x22; 32]), new_seed: VRFSeed([0x33; 32]), diff --git a/testnet/stacks-node/src/config.rs b/testnet/stacks-node/src/config.rs index d738745ca57..4eef0bbdd07 100644 --- a/testnet/stacks-node/src/config.rs +++ b/testnet/stacks-node/src/config.rs @@ -4,7 +4,7 @@ use std::path::PathBuf; use std::str::FromStr; use std::sync::{Arc, Mutex}; use std::time::Duration; -use std::{fs, thread}; +use std::{cmp, fs, thread}; use clarity::vm::costs::ExecutionCost; use clarity::vm::types::{AssetIdentifier, PrincipalData, QualifiedContractIdentifier}; @@ -49,10 +49,26 @@ use stacks_common::util::secp256k1::{Secp256k1PrivateKey, Secp256k1PublicKey}; use crate::chain_data::MinerStats; pub const DEFAULT_SATS_PER_VB: u64 = 50; +pub const OP_TX_BLOCK_COMMIT_ESTIM_SIZE: u64 = 380; +pub const OP_TX_DELEGATE_STACKS_ESTIM_SIZE: u64 = 230; +pub const OP_TX_LEADER_KEY_ESTIM_SIZE: u64 = 290; +pub const OP_TX_PRE_STACKS_ESTIM_SIZE: u64 = 280; +pub const OP_TX_STACK_STX_ESTIM_SIZE: u64 = 250; +pub const OP_TX_TRANSFER_STACKS_ESTIM_SIZE: u64 = 230; +pub const OP_TX_VOTE_AGG_ESTIM_SIZE: u64 = 230; + +pub const OP_TX_ANY_ESTIM_SIZE: u64 = fmax!( + OP_TX_BLOCK_COMMIT_ESTIM_SIZE, + OP_TX_DELEGATE_STACKS_ESTIM_SIZE, + OP_TX_LEADER_KEY_ESTIM_SIZE, + OP_TX_PRE_STACKS_ESTIM_SIZE, + OP_TX_STACK_STX_ESTIM_SIZE, + OP_TX_TRANSFER_STACKS_ESTIM_SIZE, + OP_TX_VOTE_AGG_ESTIM_SIZE +); + const DEFAULT_MAX_RBF_RATE: u64 = 150; // 1.5x const DEFAULT_RBF_FEE_RATE_INCREMENT: u64 = 5; -const LEADER_KEY_TX_ESTIM_SIZE: u64 = 290; -const BLOCK_COMMIT_TX_ESTIM_SIZE: u64 = 350; const INV_REWARD_CYCLES_TESTNET: u64 = 6; #[derive(Clone, Deserialize, Default, Debug)] @@ -101,7 +117,8 @@ mod tests { seed = "invalid-hex-value" "#, ) - .unwrap() + .unwrap(), + false ) .unwrap_err() ); @@ -115,7 +132,8 @@ mod tests { local_peer_seed = "invalid-hex-value" "#, ) - .unwrap() + .unwrap(), + false ) .unwrap_err() ); @@ -130,6 +148,7 @@ mod tests { "#, ) .unwrap(), + false, ) .unwrap_err(); assert_eq!( @@ -137,7 +156,7 @@ mod tests { &actual_err_msg[..expected_err_prefix.len()] ); - assert!(Config::from_config_file(ConfigFile::from_str("").unwrap()).is_ok()); + assert!(Config::from_config_file(ConfigFile::from_str("").unwrap(), false).is_ok()); } #[test] @@ -195,6 +214,7 @@ mod tests { "#, ) .unwrap(), + false, ) .expect("Expected to be able to parse block proposal token from file"); @@ -218,6 +238,7 @@ mod tests { "# )) .expect("Expected to be able to parse config file from string"), + false, ) .expect("Expected to be able to parse affirmation map from file"); @@ -241,7 +262,7 @@ mod tests { )) .expect("Expected to be able to parse config file from string"); - assert!(Config::from_config_file(file).is_err()); + assert!(Config::from_config_file(file, false).is_err()); } #[test] @@ -249,6 +270,7 @@ mod tests { let config = Config::from_config_file( ConfigFile::from_str(r#""#) .expect("Expected to be able to parse config file from string"), + false, ) .expect("Expected to be able to parse affirmation map from file"); @@ -266,6 +288,7 @@ mod tests { "#, ) .expect("Expected to be able to parse config file from string"), + false, ) .expect("Expected to be able to parse affirmation map from file"); // Should default add xenon affirmation overrides @@ -291,6 +314,7 @@ mod tests { "#, )) .expect("Expected to be able to parse config file from string"), + false, ) .expect("Expected to be able to parse affirmation map from file"); // Should default add xenon affirmation overrides, but overwrite with the configured one above @@ -505,7 +529,7 @@ lazy_static! { heartbeat: 3600, // can't use u64::max, because sqlite stores as i64. private_key_lifetime: 9223372036854775807, - num_neighbors: 16, // number of neighbors whose inventories we track + num_neighbors: 32, // number of neighbors whose inventories we track num_clients: 750, // number of inbound p2p connections soft_num_neighbors: 16, // soft-limit on the number of neighbors whose inventories we track soft_num_clients: 750, // soft limit on the number of inbound p2p connections @@ -537,7 +561,7 @@ impl Config { let Ok(config_file) = ConfigFile::from_path(path.as_str()) else { return self.burnchain.clone(); }; - let Ok(config) = Config::from_config_file(config_file) else { + let Ok(config) = Config::from_config_file(config_file, false) else { return self.burnchain.clone(); }; config.burnchain @@ -552,12 +576,25 @@ impl Config { let Ok(config_file) = ConfigFile::from_path(path.as_str()) else { return self.miner.clone(); }; - let Ok(config) = Config::from_config_file(config_file) else { + let Ok(config) = Config::from_config_file(config_file, false) else { return self.miner.clone(); }; return config.miner; } + pub fn get_node_config(&self, resolve_bootstrap_nodes: bool) -> NodeConfig { + let Some(path) = &self.config_path else { + return self.node.clone(); + }; + let Ok(config_file) = ConfigFile::from_path(path.as_str()) else { + return self.node.clone(); + }; + let Ok(config) = Config::from_config_file(config_file, resolve_bootstrap_nodes) else { + return self.node.clone(); + }; + return config.node; + } + /// Apply any test settings to this burnchain config struct #[cfg_attr(test, mutants::skip)] fn apply_test_settings(&self, burnchain: &mut Burnchain) { @@ -806,6 +843,8 @@ impl Config { } } + // TODO: add tests from mutation testing results #4866 + #[cfg_attr(test, mutants::skip)] fn make_epochs( conf_epochs: &[StacksEpochConfigFile], burn_mode: &str, @@ -928,11 +967,18 @@ impl Config { Ok(out_epochs) } - pub fn from_config_file(config_file: ConfigFile) -> Result { - Self::from_config_default(config_file, Config::default()) + pub fn from_config_file( + config_file: ConfigFile, + resolve_bootstrap_nodes: bool, + ) -> Result { + Self::from_config_default(config_file, Config::default(), resolve_bootstrap_nodes) } - fn from_config_default(config_file: ConfigFile, default: Config) -> Result { + fn from_config_default( + config_file: ConfigFile, + default: Config, + resolve_bootstrap_nodes: bool, + ) -> Result { let Config { node: default_node_config, burnchain: default_burnchain_config, @@ -983,9 +1029,15 @@ impl Config { }; if let Some(bootstrap_node) = bootstrap_node { - node.set_bootstrap_nodes(bootstrap_node, burnchain.chain_id, burnchain.peer_version); + if resolve_bootstrap_nodes { + node.set_bootstrap_nodes( + bootstrap_node, + burnchain.chain_id, + burnchain.peer_version, + ); + } } else { - if is_mainnet { + if is_mainnet && resolve_bootstrap_nodes { let bootstrap_node = ConfigFile::mainnet().node.unwrap().bootstrap_node.unwrap(); node.set_bootstrap_nodes( bootstrap_node, @@ -1220,6 +1272,28 @@ impl Config { self.events_observers.len() > 0 } + pub fn make_nakamoto_block_builder_settings( + &self, + miner_status: Arc>, + ) -> BlockBuilderSettings { + let miner_config = self.get_miner_config(); + BlockBuilderSettings { + max_miner_time_ms: miner_config.nakamoto_attempt_time_ms, + mempool_settings: MemPoolWalkSettings { + max_walk_time_ms: miner_config.nakamoto_attempt_time_ms, + consider_no_estimate_tx_prob: miner_config.probability_pick_no_estimate_tx, + nonce_cache_size: miner_config.nonce_cache_size, + candidate_retry_cache_size: miner_config.candidate_retry_cache_size, + txs_to_consider: miner_config.txs_to_consider, + filter_origins: miner_config.filter_origins, + }, + miner_status, + confirm_microblocks: false, + } + } + + // TODO: add tests from mutation testing results #4867 + #[cfg_attr(test, mutants::skip)] pub fn make_block_builder_settings( &self, attempt: u64, @@ -1268,6 +1342,20 @@ impl Config { } None } + + /// Determine how long the p2p state machine should poll for. + /// If the node is not mining, then use a default value. + /// If the node is mining, however, then at the time of this writing, the miner's latency is in + /// part dependent on the state machine getting block data back to the miner quickly, and thus + /// the poll time is dependent on the first attempt time. + pub fn get_poll_time(&self) -> u64 { + let poll_timeout = if self.node.miner { + cmp::min(5000, self.miner.first_attempt_time_ms / 2) + } else { + 5000 + }; + poll_timeout + } } impl std::default::Default for Config { @@ -1355,8 +1443,8 @@ impl BurnchainConfig { poll_time_secs: 10, // TODO: this is a testnet specific value. satoshis_per_byte: DEFAULT_SATS_PER_VB, max_rbf: DEFAULT_MAX_RBF_RATE, - leader_key_tx_estimated_size: LEADER_KEY_TX_ESTIM_SIZE, - block_commit_tx_estimated_size: BLOCK_COMMIT_TX_ESTIM_SIZE, + leader_key_tx_estimated_size: OP_TX_LEADER_KEY_ESTIM_SIZE, + block_commit_tx_estimated_size: OP_TX_BLOCK_COMMIT_ESTIM_SIZE, rbf_fee_increment: DEFAULT_RBF_FEE_RATE_INCREMENT, first_burn_block_height: None, first_burn_block_timestamp: None, @@ -1727,6 +1815,13 @@ pub struct NodeConfig { pub max_microblocks: u64, pub wait_time_for_microblocks: u64, pub wait_time_for_blocks: u64, + /// Controls how frequently, in milliseconds, the nakamoto miner's relay thread acts on its own initiative + /// (as opposed to responding to an event from the networking thread, etc.). This is roughly + /// how frequently the miner checks if a new burnchain block has been processed. + /// + /// Default value of 10 seconds is reasonable in mainnet (where bitcoin blocks are ~10 minutes), + /// but environments where burn blocks are more frequent may want to decrease this value. + pub next_initiative_delay: u64, pub prometheus_bind: Option, pub marf_cache_strategy: Option, pub marf_defer_hashing: bool, @@ -2012,6 +2107,7 @@ impl Default for NodeConfig { max_microblocks: u16::MAX as u64, wait_time_for_microblocks: 30_000, wait_time_for_blocks: 30_000, + next_initiative_delay: 10_000, prometheus_bind: None, marf_cache_strategy: None, marf_defer_hashing: true, @@ -2192,6 +2288,8 @@ pub struct MinerConfig { pub first_attempt_time_ms: u64, pub subsequent_attempt_time_ms: u64, pub microblock_attempt_time_ms: u64, + /// Max time to assemble Nakamoto block + pub nakamoto_attempt_time_ms: u64, pub probability_pick_no_estimate_tx: u8, pub block_reward_recipient: Option, /// If possible, mine with a p2wpkh address @@ -2242,6 +2340,7 @@ impl Default for MinerConfig { first_attempt_time_ms: 10, subsequent_attempt_time_ms: 120_000, microblock_attempt_time_ms: 30_000, + nakamoto_attempt_time_ms: 20_000, probability_pick_no_estimate_tx: 25, block_reward_recipient: None, segwit: false, @@ -2310,6 +2409,7 @@ pub struct ConnectionOptionsFile { pub antientropy_public: Option, pub private_neighbors: Option, pub block_proposal_token: Option, + pub antientropy_retry: Option, } impl ConnectionOptionsFile { @@ -2341,6 +2441,7 @@ impl ConnectionOptionsFile { self.read_only_call_limit_runtime.map(|x| { read_only_call_limit.runtime = x; }); + let default = ConnectionOptions::default(); Ok(ConnectionOptions { read_only_call_limit, inbox_maxlen: self @@ -2434,7 +2535,8 @@ impl ConnectionOptionsFile { antientropy_public: self.antientropy_public.unwrap_or(true), private_neighbors: self.private_neighbors.unwrap_or(true), block_proposal_token: self.block_proposal_token, - ..ConnectionOptions::default() + antientropy_retry: self.antientropy_retry.unwrap_or(default.antientropy_retry), + ..default }) } } @@ -2459,6 +2561,7 @@ pub struct NodeConfigFile { pub max_microblocks: Option, pub wait_time_for_microblocks: Option, pub wait_time_for_blocks: Option, + pub next_initiative_delay: Option, pub prometheus_bind: Option, pub marf_cache_strategy: Option, pub marf_defer_hashing: Option, @@ -2519,6 +2622,9 @@ impl NodeConfigFile { wait_time_for_blocks: self .wait_time_for_blocks .unwrap_or(default_node_config.wait_time_for_blocks), + next_initiative_delay: self + .next_initiative_delay + .unwrap_or(default_node_config.next_initiative_delay), prometheus_bind: self.prometheus_bind, marf_cache_strategy: self.marf_cache_strategy, marf_defer_hashing: self @@ -2567,6 +2673,7 @@ pub struct MinerConfigFile { pub first_attempt_time_ms: Option, pub subsequent_attempt_time_ms: Option, pub microblock_attempt_time_ms: Option, + pub nakamoto_attempt_time_ms: Option, pub probability_pick_no_estimate_tx: Option, pub block_reward_recipient: Option, pub segwit: Option, @@ -2600,6 +2707,9 @@ impl MinerConfigFile { microblock_attempt_time_ms: self .microblock_attempt_time_ms .unwrap_or(miner_default_config.microblock_attempt_time_ms), + nakamoto_attempt_time_ms: self + .nakamoto_attempt_time_ms + .unwrap_or(miner_default_config.nakamoto_attempt_time_ms), probability_pick_no_estimate_tx: self .probability_pick_no_estimate_tx .unwrap_or(miner_default_config.probability_pick_no_estimate_tx), diff --git a/testnet/stacks-node/src/event_dispatcher.rs b/testnet/stacks-node/src/event_dispatcher.rs index 665334e924c..8e53fd47eb9 100644 --- a/testnet/stacks-node/src/event_dispatcher.rs +++ b/testnet/stacks-node/src/event_dispatcher.rs @@ -6,7 +6,9 @@ use std::thread::sleep; use std::time::Duration; use async_h1::client; +use async_std::future::timeout; use async_std::net::TcpStream; +use async_std::task; use clarity::vm::analysis::contract_interface_builder::build_contract_interface; use clarity::vm::costs::ExecutionCost; use clarity::vm::events::{FTEventType, NFTEventType, STXEventType}; @@ -128,6 +130,7 @@ pub struct MinedMicroblockEvent { #[derive(Clone, Debug, Serialize, Deserialize)] pub struct MinedNakamotoBlockEvent { pub target_burn_height: u64, + pub parent_block_id: String, pub block_hash: String, pub block_id: String, pub stacks_height: u64, @@ -137,6 +140,7 @@ pub struct MinedNakamotoBlockEvent { pub signer_signature_hash: Sha512Trunc256Sum, pub tx_events: Vec, pub signer_bitvec: String, + pub signer_signature: Vec, } impl InnerStackerDBChannel { @@ -295,6 +299,9 @@ impl RewardSetEventPayload { impl EventObserver { pub fn send_payload(&self, payload: &serde_json::Value, path: &str) { + debug!( + "Event dispatcher: Sending payload"; "url" => %path, "payload" => ?payload + ); let body = match serde_json::to_vec(&payload) { Ok(body) => body, Err(err) => { @@ -314,6 +321,7 @@ impl EventObserver { }; let backoff = Duration::from_millis((1.0 * 1_000.0) as u64); + let connection_timeout = Duration::from_secs(5); loop { let body = body.clone(); @@ -321,20 +329,25 @@ impl EventObserver { req.append_header("Content-Type", "application/json"); req.set_body(body); - let response = async_std::task::block_on(async { - let stream = match TcpStream::connect(self.endpoint.clone()).await { - Ok(stream) => stream, - Err(err) => { - warn!("Event dispatcher: connection failed - {:?}", err); - return None; - } - }; + let response = task::block_on(async { + let stream = + match timeout(connection_timeout, TcpStream::connect(&self.endpoint)).await { + Ok(Ok(stream)) => stream, + Ok(Err(err)) => { + warn!("Event dispatcher: connection failed - {:?}", err); + return None; + } + Err(_) => { + error!("Event dispatcher: connection attempt timed out"); + return None; + } + }; match client::connect(stream, req).await { Ok(response) => Some(response), Err(err) => { warn!("Event dispatcher: rpc invocation failed - {:?}", err); - return None; + None } } }); @@ -647,17 +660,13 @@ impl EventObserver { "signer_signature_hash".into(), format!("0x{}", header.signer_signature_hash()).into(), ); - as_object_mut.insert( - "signer_signature".into(), - format!("0x{}", header.signer_signature_hash()).into(), - ); as_object_mut.insert( "miner_signature".into(), format!("0x{}", &header.miner_signature).into(), ); as_object_mut.insert( "signer_signature".into(), - format!("0x{}", &header.signer_signature).into(), + serde_json::to_value(&header.signer_signature).unwrap_or_default(), ); } @@ -1244,7 +1253,7 @@ impl EventDispatcher { return; } - let signer_bitvec = serde_json::to_value(block.header.signer_bitvec.clone()) + let signer_bitvec = serde_json::to_value(block.header.pox_treatment.clone()) .unwrap_or_default() .as_str() .unwrap_or_default() @@ -1252,6 +1261,7 @@ impl EventDispatcher { let payload = serde_json::to_value(MinedNakamotoBlockEvent { target_burn_height, + parent_block_id: block.header.parent_block_id.to_string(), block_hash: block.header.block_hash().to_string(), block_id: block.header.block_id().to_string(), stacks_height: block.header.chain_length, @@ -1260,6 +1270,7 @@ impl EventDispatcher { tx_events, miner_signature: block.header.miner_signature.clone(), signer_signature_hash: block.header.signer_signature_hash(), + signer_signature: block.header.signer_signature.clone(), signer_bitvec, }) .unwrap(); @@ -1433,8 +1444,12 @@ impl EventDispatcher { mod test { use clarity::vm::costs::ExecutionCost; use stacks::burnchains::{PoxConstants, Txid}; - use stacks::chainstate::stacks::db::StacksHeaderInfo; + use stacks::chainstate::nakamoto::{NakamotoBlock, NakamotoBlockHeader}; + use stacks::chainstate::stacks::db::{StacksBlockHeaderTypes, StacksHeaderInfo}; + use stacks::chainstate::stacks::events::StacksBlockEventData; use stacks::chainstate::stacks::StacksBlock; + use stacks::types::chainstate::BlockHeaderHash; + use stacks::util::secp256k1::MessageSignature; use stacks_common::bitvec::BitVec; use stacks_common::types::chainstate::{BurnchainHeaderHash, StacksBlockId}; @@ -1497,4 +1512,66 @@ mod test { expected_bitvec_str ); } + + #[test] + fn test_block_processed_event_nakamoto() { + let observer = EventObserver { + endpoint: "nowhere".to_string(), + }; + + let filtered_events = vec![]; + let mut block_header = NakamotoBlockHeader::empty(); + let signer_signature = vec![ + MessageSignature::from_bytes(&[0; 65]).unwrap(), + MessageSignature::from_bytes(&[1; 65]).unwrap(), + ]; + block_header.signer_signature = signer_signature.clone(); + let block = NakamotoBlock { + header: block_header.clone(), + txs: vec![], + }; + let mut metadata = StacksHeaderInfo::regtest_genesis(); + metadata.anchored_header = StacksBlockHeaderTypes::Nakamoto(block_header.clone()); + let receipts = vec![]; + let parent_index_hash = StacksBlockId([0; 32]); + let winner_txid = Txid([0; 32]); + let mature_rewards = serde_json::Value::Array(vec![]); + let parent_burn_block_hash = BurnchainHeaderHash([0; 32]); + let parent_burn_block_height = 0; + let parent_burn_block_timestamp = 0; + let anchored_consumed = ExecutionCost::zero(); + let mblock_confirmed_consumed = ExecutionCost::zero(); + let pox_constants = PoxConstants::testnet_default(); + let signer_bitvec = BitVec::zeros(2).expect("Failed to create BitVec with length 2"); + + let payload = observer.make_new_block_processed_payload( + filtered_events, + &StacksBlockEventData::from((block, BlockHeaderHash([0; 32]))), + &metadata, + &receipts, + &parent_index_hash, + &winner_txid, + &mature_rewards, + parent_burn_block_hash, + parent_burn_block_height, + parent_burn_block_timestamp, + &anchored_consumed, + &mblock_confirmed_consumed, + &pox_constants, + &None, + &Some(signer_bitvec.clone()), + ); + + let event_signer_signature = payload + .get("signer_signature") + .unwrap() + .as_array() + .expect("Expected signer_signature to be an array") + .iter() + .cloned() + .map(serde_json::from_value::) + .collect::, _>>() + .expect("Unable to deserialize array of MessageSignature"); + assert_eq!(event_signer_signature, signer_signature); + } } diff --git a/testnet/stacks-node/src/globals.rs b/testnet/stacks-node/src/globals.rs index a6a2fdad3c0..b0f338032a5 100644 --- a/testnet/stacks-node/src/globals.rs +++ b/testnet/stacks-node/src/globals.rs @@ -59,7 +59,7 @@ pub struct Globals { /// Global flag to see if we should keep running pub should_keep_running: Arc, /// Status of our VRF key registration state (shared between the main thread and the relayer) - leader_key_registration_state: Arc>, + pub leader_key_registration_state: Arc>, /// Last miner config loaded last_miner_config: Arc>>, /// burnchain height at which we start mining @@ -103,6 +103,7 @@ impl Globals { sync_comms: PoxSyncWatchdogComms, should_keep_running: Arc, start_mining_height: u64, + leader_key_registration_state: LeaderKeyRegistrationState, ) -> Globals { Globals { last_sortition: Arc::new(Mutex::new(None)), @@ -113,9 +114,7 @@ impl Globals { counters, sync_comms, should_keep_running, - leader_key_registration_state: Arc::new(Mutex::new( - LeaderKeyRegistrationState::Inactive, - )), + leader_key_registration_state: Arc::new(Mutex::new(leader_key_registration_state)), last_miner_config: Arc::new(Mutex::new(None)), start_mining_height: Arc::new(Mutex::new(start_mining_height)), estimated_winning_probs: Arc::new(Mutex::new(HashMap::new())), @@ -287,6 +286,7 @@ impl Globals { vrf_public_key: op.public_key, block_height: op.block_height as u64, op_vtxindex: op.vtxindex as u32, + memo: op.memo, }; **leader_key_registration_state = diff --git a/testnet/stacks-node/src/main.rs b/testnet/stacks-node/src/main.rs index cb512969c05..41b74262787 100644 --- a/testnet/stacks-node/src/main.rs +++ b/testnet/stacks-node/src/main.rs @@ -65,7 +65,7 @@ static GLOBAL: Jemalloc = Jemalloc; fn cli_pick_best_tip(config_path: &str, at_stacks_height: Option) -> TipCandidate { info!("Loading config at path {}", config_path); let config = match ConfigFile::from_path(config_path) { - Ok(config_file) => Config::from_config_file(config_file).unwrap(), + Ok(config_file) => Config::from_config_file(config_file, true).unwrap(), Err(e) => { warn!("Invalid config file: {}", e); process::exit(1); @@ -105,7 +105,7 @@ fn cli_get_miner_spend( ) -> u64 { info!("Loading config at path {}", config_path); let config = match ConfigFile::from_path(&config_path) { - Ok(config_file) => Config::from_config_file(config_file).unwrap(), + Ok(config_file) => Config::from_config_file(config_file, true).unwrap(), Err(e) => { warn!("Invalid config file: {}", e); process::exit(1); @@ -334,7 +334,7 @@ fn main() { process::exit(1); } }; - match Config::from_config_file(config_file) { + match Config::from_config_file(config_file, true) { Ok(_) => { info!("Loaded config!"); process::exit(0); @@ -365,9 +365,11 @@ fn main() { let seed = { let config_path: Option = args.opt_value_from_str("--config").unwrap(); if let Some(config_path) = config_path { - let conf = - Config::from_config_file(ConfigFile::from_path(&config_path).unwrap()) - .unwrap(); + let conf = Config::from_config_file( + ConfigFile::from_path(&config_path).unwrap(), + true, + ) + .unwrap(); args.finish(); conf.node.seed } else { @@ -416,7 +418,7 @@ fn main() { } }; - let conf = match Config::from_config_file(config_file) { + let conf = match Config::from_config_file(config_file, true) { Ok(conf) => conf, Err(e) => { warn!("Invalid config: {}", e); diff --git a/testnet/stacks-node/src/nakamoto_node.rs b/testnet/stacks-node/src/nakamoto_node.rs index 7b7fb32a64a..c57d630a583 100644 --- a/testnet/stacks-node/src/nakamoto_node.rs +++ b/testnet/stacks-node/src/nakamoto_node.rs @@ -14,9 +14,10 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . use std::collections::HashSet; +use std::io::Write; use std::sync::mpsc::Receiver; -use std::thread; use std::thread::JoinHandle; +use std::{fs, thread}; use stacks::burnchains::{BurnchainSigner, Txid}; use stacks::chainstate::burn::db::sortdb::SortitionDB; @@ -33,6 +34,7 @@ use stacks_common::types::StacksEpochId; use super::{Config, EventDispatcher, Keychain}; use crate::burnchains::bitcoin_regtest_controller::addr2str; use crate::neon_node::{LeaderKeyRegistrationState, StacksNode as NeonNode}; +use crate::run_loop::boot_nakamoto::Neon2NakaData; use crate::run_loop::nakamoto::{Globals, RunLoop}; use crate::run_loop::RegisteredKey; @@ -94,6 +96,8 @@ pub enum Error { BadVrfConstruction, CannotSelfSign, MiningFailure(ChainstateError), + /// The miner didn't accept their own block + AcceptFailure(ChainstateError), MinerSignatureError(&'static str), SignerSignatureError(String), /// A failure occurred while configuring the miner thread @@ -132,6 +136,7 @@ impl StacksNode { globals: Globals, // relay receiver endpoint for the p2p thread, so the relayer can feed it data to push relay_recv: Receiver, + data_from_neon: Option, ) -> StacksNode { let config = runloop.config().clone(); let is_miner = runloop.is_miner(); @@ -157,7 +162,11 @@ impl StacksNode { .connect_mempool_db() .expect("FATAL: database failure opening mempool"); - let mut p2p_net = NeonNode::setup_peer_network(&config, &atlas_config, burnchain); + let data_from_neon = data_from_neon.unwrap_or_default(); + + let mut p2p_net = data_from_neon + .peer_network + .unwrap_or_else(|| NeonNode::setup_peer_network(&config, &atlas_config, burnchain)); let stackerdbs = StackerDBs::connect(&config.get_stacker_db_file_path(), true) .expect("FATAL: failed to connect to stacker DB"); @@ -167,7 +176,7 @@ impl StacksNode { let local_peer = p2p_net.local_peer.clone(); // setup initial key registration - let leader_key_registration_state = if config.node.mock_mining { + let leader_key_registration_state = if config.get_node_config(false).mock_mining { // mock mining, pretend to have a registered key let (vrf_public_key, _) = keychain.make_vrf_keypair(VRF_MOCK_MINER_KEY); LeaderKeyRegistrationState::Active(RegisteredKey { @@ -175,10 +184,22 @@ impl StacksNode { block_height: 1, op_vtxindex: 1, vrf_public_key, + memo: keychain.get_nakamoto_pkh().as_bytes().to_vec(), }) } else { - LeaderKeyRegistrationState::Inactive + match &data_from_neon.leader_key_registration_state { + LeaderKeyRegistrationState::Active(registered_key) => { + let pubkey_hash = keychain.get_nakamoto_pkh(); + if pubkey_hash.as_ref() == ®istered_key.memo { + data_from_neon.leader_key_registration_state + } else { + LeaderKeyRegistrationState::Inactive + } + } + _ => LeaderKeyRegistrationState::Inactive, + } }; + globals.set_initial_leader_key_registration_state(leader_key_registration_state); let relayer_thread = @@ -257,6 +278,7 @@ impl StacksNode { /// Called from the main thread. pub fn process_burnchain_state( &mut self, + config: &Config, sortdb: &SortitionDB, sort_id: &SortitionId, ibd: bool, @@ -296,9 +318,18 @@ impl StacksNode { let num_key_registers = key_registers.len(); - self.globals + let activated_key_opt = self + .globals .try_activate_leader_key_registration(block_height, key_registers); + // save the registered VRF key + if let (Some(activated_key), Some(path)) = ( + activated_key_opt, + config.miner.activated_vrf_key_path.as_ref(), + ) { + save_activated_vrf_key(path, &activated_key); + } + debug!( "Processed burnchain state"; "burn_height" => block_height, @@ -319,3 +350,27 @@ impl StacksNode { self.p2p_thread_handle.join().unwrap(); } } + +pub(crate) fn save_activated_vrf_key(path: &str, activated_key: &RegisteredKey) { + info!("Activated VRF key; saving to {}", path); + + let Ok(key_json) = serde_json::to_string(&activated_key) else { + warn!("Failed to serialize VRF key"); + return; + }; + + let mut f = match fs::File::create(&path) { + Ok(f) => f, + Err(e) => { + warn!("Failed to create {}: {:?}", &path, &e); + return; + } + }; + + if let Err(e) = f.write_all(key_json.as_str().as_bytes()) { + warn!("Failed to write activated VRF key to {}: {:?}", &path, &e); + return; + } + + info!("Saved activated VRF key to {}", &path); +} diff --git a/testnet/stacks-node/src/nakamoto_node/miner.rs b/testnet/stacks-node/src/nakamoto_node/miner.rs index 3a976aecca2..a277d2d8645 100644 --- a/testnet/stacks-node/src/nakamoto_node/miner.rs +++ b/testnet/stacks-node/src/nakamoto_node/miner.rs @@ -22,39 +22,47 @@ use clarity::boot_util::boot_code_id; use clarity::vm::clarity::ClarityConnection; use clarity::vm::types::{PrincipalData, QualifiedContractIdentifier}; use hashbrown::HashSet; -use libsigner::{ - BlockProposalSigners, MessageSlotID, SignerMessage, SignerSession, StackerDBSession, -}; +use libsigner::v0::messages::{MinerSlotID, SignerMessage as SignerMessageV0}; +use libsigner::v1::messages::{MessageSlotID, SignerMessage as SignerMessageV1}; +use libsigner::StackerDBSession; use stacks::burnchains::Burnchain; use stacks::chainstate::burn::db::sortdb::SortitionDB; use stacks::chainstate::burn::{BlockSnapshot, ConsensusHash}; +use stacks::chainstate::coordinator::OnChainRewardSetProvider; +use stacks::chainstate::nakamoto::coordinator::load_nakamoto_reward_set; use stacks::chainstate::nakamoto::miner::{NakamotoBlockBuilder, NakamotoTenureInfo}; use stacks::chainstate::nakamoto::signer_set::NakamotoSigners; +use stacks::chainstate::nakamoto::staging_blocks::NakamotoBlockObtainMethod; use stacks::chainstate::nakamoto::{NakamotoBlock, NakamotoChainState}; -use stacks::chainstate::stacks::boot::MINERS_NAME; +use stacks::chainstate::stacks::boot::{RewardSet, MINERS_NAME}; use stacks::chainstate::stacks::db::{StacksChainState, StacksHeaderInfo}; use stacks::chainstate::stacks::{ CoinbasePayload, Error as ChainstateError, StacksTransaction, StacksTransactionSigner, - TenureChangeCause, TenureChangePayload, ThresholdSignature, TransactionAnchorMode, - TransactionPayload, TransactionVersion, + TenureChangeCause, TenureChangePayload, TransactionAnchorMode, TransactionPayload, + TransactionVersion, }; +use stacks::net::p2p::NetworkHandle; use stacks::net::stackerdb::StackerDBs; +use stacks::net::{NakamotoBlocksData, StacksMessageType}; +use stacks::util::secp256k1::MessageSignature; use stacks_common::codec::read_next; use stacks_common::types::chainstate::{StacksAddress, StacksBlockId}; use stacks_common::types::{PrivateKey, StacksEpochId}; -use stacks_common::util::hash::Hash160; use stacks_common::util::vrf::VRFProof; -use wsts::curve::point::Point; use wsts::curve::scalar::Scalar; use super::relayer::RelayerThread; use super::sign_coordinator::SignCoordinator; use super::{Config, Error as NakamotoNodeError, EventDispatcher, Keychain}; -use crate::burnchains::bitcoin_regtest_controller::burnchain_params_from_config; use crate::nakamoto_node::VRF_MOCK_MINER_KEY; +use crate::neon_node; use crate::run_loop::nakamoto::Globals; use crate::run_loop::RegisteredKey; -use crate::{neon_node, ChainTip}; + +#[cfg(test)] +pub static TEST_BROADCAST_STALL: std::sync::Mutex> = std::sync::Mutex::new(None); +#[cfg(test)] +pub static TEST_SKIP_P2P_BROADCAST: std::sync::Mutex> = std::sync::Mutex::new(None); /// If the miner was interrupted while mining a block, how long should the /// miner thread sleep before trying again? @@ -72,8 +80,12 @@ pub enum MinerDirective { StopTenure, } +#[derive(PartialEq, Debug, Clone)] +/// Tenure info needed to construct a tenure change or tenure extend transaction struct ParentTenureInfo { + /// The number of blocks in the parent tenure parent_tenure_blocks: u64, + /// The consensus hash of the parent tenure parent_tenure_consensus_hash: ConsensusHash, } @@ -86,6 +98,33 @@ struct ParentStacksBlockInfo { parent_tenure: Option, } +/// The reason the miner thread was spawned +#[derive(PartialEq, Clone, Debug)] +pub enum MinerReason { + /// The miner thread was spawned to begin a new tenure + BlockFound, + /// The miner thread was spawned to extend an existing tenure + Extended { + /// Current consensus hash on the underlying burnchain. Corresponds to the last-seen + /// sortition. + burn_view_consensus_hash: ConsensusHash, + }, +} + +impl std::fmt::Display for MinerReason { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match self { + MinerReason::BlockFound => write!(f, "BlockFound"), + MinerReason::Extended { + burn_view_consensus_hash, + } => write!( + f, + "Extended: burn_view_consensus_hash = {burn_view_consensus_hash:?}", + ), + } + } +} + pub struct BlockMinerThread { /// node config struct config: Config, @@ -100,11 +139,18 @@ pub struct BlockMinerThread { /// Copy of the node's registered VRF key registered_key: RegisteredKey, /// Burnchain block snapshot which elected this miner + burn_election_block: BlockSnapshot, + /// Current burnchain tip burn_block: BlockSnapshot, /// The start of the parent tenure for this tenure parent_tenure_id: StacksBlockId, /// Handle to the node's event dispatcher event_dispatcher: EventDispatcher, + /// The reason the miner thread was spawned + reason: MinerReason, + /// Handle to the p2p thread for block broadcast + p2p_handle: NetworkHandle, + signer_set_cache: Option, } impl BlockMinerThread { @@ -112,8 +158,10 @@ impl BlockMinerThread { pub fn new( rt: &RelayerThread, registered_key: RegisteredKey, + burn_election_block: BlockSnapshot, burn_block: BlockSnapshot, parent_tenure_id: StacksBlockId, + reason: MinerReason, ) -> BlockMinerThread { BlockMinerThread { config: rt.config.clone(), @@ -122,22 +170,40 @@ impl BlockMinerThread { burnchain: rt.burnchain.clone(), mined_blocks: vec![], registered_key, + burn_election_block, burn_block, event_dispatcher: rt.event_dispatcher.clone(), parent_tenure_id, + reason, + p2p_handle: rt.get_p2p_handle(), + signer_set_cache: None, } } /// Stop a miner tenure by blocking the miner and then joining the tenure thread - pub fn stop_miner(globals: &Globals, prior_miner: JoinHandle<()>) { + pub fn stop_miner( + globals: &Globals, + prior_miner: JoinHandle>, + ) -> Result<(), NakamotoNodeError> { globals.block_miner(); - prior_miner + let prior_miner_result = prior_miner .join() - .expect("FATAL: IO failure joining prior mining thread"); + .map_err(|_| NakamotoNodeError::MiningFailure(ChainstateError::MinerAborted))?; + if let Err(e) = prior_miner_result { + // it's okay if the prior miner thread exited with an error. + // in many cases this is expected (i.e., a burnchain block occurred) + // if some error condition should be handled though, this is the place + // to do that handling. + debug!("Prior mining thread exited with: {e:?}"); + } globals.unblock_miner(); + Ok(()) } - pub fn run_miner(mut self, prior_miner: Option>) { + pub fn run_miner( + mut self, + prior_miner: Option>>, + ) -> Result<(), NakamotoNodeError> { // when starting a new tenure, block the mining thread if its currently running. // the new mining thread will join it (so that the new mining thread stalls, not the relayer) debug!( @@ -145,17 +211,48 @@ impl BlockMinerThread { "had_prior_miner" => prior_miner.is_some(), "parent_tenure_id" => %self.parent_tenure_id, "thread_id" => ?thread::current().id(), + "burn_block_consensus_hash" => %self.burn_block.consensus_hash, + "reason" => %self.reason, ); if let Some(prior_miner) = prior_miner { - Self::stop_miner(&self.globals, prior_miner); + Self::stop_miner(&self.globals, prior_miner)?; } let mut stackerdbs = StackerDBs::connect(&self.config.get_stacker_db_file_path(), true) - .expect("FATAL: failed to connect to stacker DB"); + .map_err(|e| NakamotoNodeError::MiningFailure(ChainstateError::NetError(e)))?; let mut attempts = 0; // now, actually run this tenure loop { let new_block = loop { + // If we're mock mining, we may not have processed the block that the + // actual tenure winner committed to yet. So, before attempting to + // mock mine, check if the parent is processed. + if self.config.get_node_config(false).mock_mining { + let burn_db_path = self.config.get_burn_db_file_path(); + let mut burn_db = SortitionDB::open( + &burn_db_path, + true, + self.burnchain.pox_constants.clone(), + ) + .expect("FATAL: could not open sortition DB"); + let burn_tip_changed = self.check_burn_tip_changed(&burn_db); + let mut chain_state = neon_node::open_chainstate_with_faults(&self.config) + .expect("FATAL: could not open chainstate DB"); + match burn_tip_changed + .and_then(|_| self.load_block_parent_info(&mut burn_db, &mut chain_state)) + { + Ok(..) => {} + Err(NakamotoNodeError::ParentNotFound) => { + info!("Mock miner has not processed parent block yet, sleeping and trying again"); + thread::sleep(Duration::from_millis(ABORT_TRY_AGAIN_MS)); + continue; + } + Err(e) => { + warn!("Mock miner failed to load parent info: {e:?}"); + return Err(e); + } + } + } match self.mine_block(&stackerdbs) { Ok(x) => break Some(x), Err(NakamotoNodeError::MiningFailure(ChainstateError::MinerAborted)) => { @@ -173,38 +270,55 @@ impl BlockMinerThread { } Err(e) => { warn!("Failed to mine block: {e:?}"); - return; + return Err(NakamotoNodeError::MiningFailure( + ChainstateError::MinerAborted, + )); } } }; if let Some(mut new_block) = new_block { - if let Err(e) = self.propose_block(&new_block, &stackerdbs) { - error!("Unrecoverable error while proposing block to signer set: {e:?}. Ending tenure."); - return; - } - - let (aggregate_public_key, signers_signature) = match self.coordinate_signature( - &new_block, - &mut stackerdbs, - &mut attempts, - ) { - Ok(x) => x, - Err(e) => { - error!("Unrecoverable error while proposing block to signer set: {e:?}. Ending tenure."); - return; + #[cfg(test)] + { + if *TEST_BROADCAST_STALL.lock().unwrap() == Some(true) { + // Do an extra check just so we don't log EVERY time. + warn!("Broadcasting is stalled due to testing directive."; + "stacks_block_id" => %new_block.block_id(), + "stacks_block_hash" => %new_block.header.block_hash(), + "height" => new_block.header.chain_length, + "consensus_hash" => %new_block.header.consensus_hash + ); + while *TEST_BROADCAST_STALL.lock().unwrap() == Some(true) { + std::thread::sleep(std::time::Duration::from_millis(10)); + } + info!("Broadcasting is no longer stalled due to testing directive."; + "block_id" => %new_block.block_id(), + "height" => new_block.header.chain_length, + "consensus_hash" => %new_block.header.consensus_hash + ); } - }; + } - new_block.header.signer_signature = signers_signature; - if let Err(e) = self.broadcast(new_block.clone(), &aggregate_public_key) { + let (reward_set, signer_signature) = + match self.gather_signatures(&mut new_block, &mut stackerdbs, &mut attempts) { + Ok(x) => x, + Err(e) => { + error!( + "Error while gathering signatures: {e:?}. Will try mining again." + ); + continue; + } + }; + + new_block.header.signer_signature = signer_signature; + if let Err(e) = self.broadcast(new_block.clone(), reward_set, &stackerdbs) { warn!("Error accepting own block: {e:?}. Will try mining again."); continue; } else { info!( "Miner: Block signed by signer set and broadcasted"; "signer_sighash" => %new_block.header.signer_signature_hash(), - "block_hash" => %new_block.header.block_hash(), + "stacks_block_hash" => %new_block.header.block_hash(), "stacks_block_id" => %new_block.header.block_id(), "block_height" => new_block.header.chain_length, "consensus_hash" => %new_block.header.consensus_hash, @@ -220,59 +334,71 @@ impl BlockMinerThread { self.mined_blocks.push(new_block); } - let sort_db = SortitionDB::open( + let Ok(sort_db) = SortitionDB::open( &self.config.get_burn_db_file_path(), true, self.burnchain.pox_constants.clone(), - ) - .expect("FATAL: could not open sortition DB"); + ) else { + error!("Failed to open sortition DB. Will try mining again."); + continue; + }; + let wait_start = Instant::now(); while wait_start.elapsed() < self.config.miner.wait_on_interim_blocks { thread::sleep(Duration::from_millis(ABORT_TRY_AGAIN_MS)); if self.check_burn_tip_changed(&sort_db).is_err() { - return; + return Err(NakamotoNodeError::BurnchainTipChanged); } } } } - fn coordinate_signature( - &mut self, - new_block: &NakamotoBlock, - stackerdbs: &mut StackerDBs, - attempts: &mut u64, - ) -> Result<(Point, ThresholdSignature), NakamotoNodeError> { - let Some(miner_privkey) = self.config.miner.mining_key else { - return Err(NakamotoNodeError::MinerConfigurationFailed( - "No mining key configured, cannot mine", - )); - }; + /// Load the signer set active for this miner's blocks. This is the + /// active reward set during `self.burn_election_block`. The miner + /// thread caches this information, and this method will consult + /// that cache (or populate it if necessary). + fn load_signer_set(&mut self) -> Result { + if let Some(set) = self.signer_set_cache.as_ref() { + return Ok(set.clone()); + } let sort_db = SortitionDB::open( &self.config.get_burn_db_file_path(), true, self.burnchain.pox_constants.clone(), ) - .expect("FATAL: could not open sortition DB"); - let tip = SortitionDB::get_block_snapshot_consensus( - sort_db.conn(), - &new_block.header.consensus_hash, - ) - .expect("FATAL: could not retrieve chain tip") - .expect("FATAL: could not retrieve chain tip"); + .map_err(|e| { + NakamotoNodeError::SigningCoordinatorFailure(format!( + "Failed to open sortition DB. Cannot mine! {e:?}" + )) + })?; + + let mut chain_state = + neon_node::open_chainstate_with_faults(&self.config).map_err(|e| { + NakamotoNodeError::SigningCoordinatorFailure(format!( + "Failed to open chainstate DB. Cannot mine! {e:?}" + )) + })?; + + let burn_election_height = self.burn_election_block.block_height; + let reward_cycle = self .burnchain - .pox_constants - .block_height_to_reward_cycle( - self.burnchain.first_block_height, - self.burn_block.block_height, - ) - .expect("FATAL: building on a burn block that is before the first burn block"); + .block_height_to_reward_cycle(burn_election_height) + .expect("FATAL: no reward cycle for sortition"); - let reward_info = match sort_db.get_preprocessed_reward_set_of(&tip.sortition_id) { - Ok(Some(x)) => x, + let reward_info = match load_nakamoto_reward_set( + reward_cycle, + &self.burn_election_block.sortition_id, + &self.burnchain, + &mut chain_state, + &self.parent_tenure_id, + &sort_db, + &OnChainRewardSetProvider::new(), + ) { + Ok(Some((reward_info, _))) => reward_info, Ok(None) => { return Err(NakamotoNodeError::SigningCoordinatorFailure( - "No reward set found. Cannot initialize miner coordinator.".into(), + "No reward set stored yet. Cannot mine!".into(), )); } Err(e) => { @@ -288,71 +414,17 @@ impl BlockMinerThread { )); }; - let mut chain_state = neon_node::open_chainstate_with_faults(&self.config) - .expect("FATAL: could not open chainstate DB"); - let sortition_handle = sort_db.index_handle_at_tip(); - let Ok(aggregate_public_key) = NakamotoChainState::get_aggregate_public_key( - &mut chain_state, - &sort_db, - &sortition_handle, - &new_block, - ) else { - return Err(NakamotoNodeError::SigningCoordinatorFailure( - "Failed to obtain the active aggregate public key. Cannot mine!".into(), - )); - }; - - #[cfg(test)] - { - // In test mode, short-circuit spinning up the SignCoordinator if the TEST_SIGNING - // channel has been created. This allows integration tests for the stacks-node - // independent of the stacks-signer. - if let Some(signature) = - crate::tests::nakamoto_integrations::TestSigningChannel::get_signature() - { - return Ok((aggregate_public_key, signature)); - } - } - - let miner_privkey_as_scalar = Scalar::from(miner_privkey.as_slice().clone()); - let mut coordinator = SignCoordinator::new( - &reward_set, - reward_cycle, - miner_privkey_as_scalar, - aggregate_public_key, - &stackerdbs, - &self.config, - ) - .map_err(|e| { - NakamotoNodeError::SigningCoordinatorFailure(format!( - "Failed to initialize the signing coordinator. Cannot mine! {e:?}" - )) - })?; - - *attempts += 1; - let signature = coordinator.begin_sign( - new_block, - *attempts, - &tip, - &self.burnchain, - &sort_db, - &stackerdbs, - )?; - - Ok((aggregate_public_key, signature)) + self.signer_set_cache = Some(reward_set.clone()); + Ok(reward_set) } - fn propose_block( + /// Gather a list of signatures from the signers for the block + fn gather_signatures( &mut self, - new_block: &NakamotoBlock, - stackerdbs: &StackerDBs, - ) -> Result<(), NakamotoNodeError> { - let rpc_socket = self.config.node.get_rpc_loopback().ok_or_else(|| { - NakamotoNodeError::MinerConfigurationFailed("Could not parse RPC bind") - })?; - let miners_contract_id = boot_code_id(MINERS_NAME, self.config.is_mainnet()); - let mut miners_session = - StackerDBSession::new(&rpc_socket.to_string(), miners_contract_id.clone()); + new_block: &mut NakamotoBlock, + stackerdbs: &mut StackerDBs, + attempts: &mut u64, + ) -> Result<(RewardSet, Vec), NakamotoNodeError> { let Some(miner_privkey) = self.config.miner.mining_key else { return Err(NakamotoNodeError::MinerConfigurationFailed( "No mining key configured, cannot mine", @@ -363,64 +435,57 @@ impl BlockMinerThread { true, self.burnchain.pox_constants.clone(), ) - .expect("FATAL: could not open sortition DB"); + .map_err(|e| { + NakamotoNodeError::SigningCoordinatorFailure(format!( + "Failed to open sortition DB. Cannot mine! {e:?}" + )) + })?; + let tip = SortitionDB::get_block_snapshot_consensus( sort_db.conn(), &new_block.header.consensus_hash, ) - .expect("FATAL: could not retrieve chain tip") - .expect("FATAL: could not retrieve chain tip"); - let reward_cycle = self - .burnchain - .pox_constants - .block_height_to_reward_cycle( - self.burnchain.first_block_height, - self.burn_block.block_height, - ) - .expect("FATAL: building on a burn block that is before the first burn block"); + .map_err(|e| { + NakamotoNodeError::SigningCoordinatorFailure(format!( + "Failed to retrieve chain tip: {:?}", + e + )) + }) + .and_then(|result| { + result.ok_or_else(|| { + NakamotoNodeError::SigningCoordinatorFailure("Failed to retrieve chain tip".into()) + }) + })?; - let proposal_msg = BlockProposalSigners { - block: new_block.clone(), - burn_height: self.burn_block.block_height, - reward_cycle, - }; - let proposal = match NakamotoBlockBuilder::make_stackerdb_block_proposal( - &sort_db, - &tip, - &stackerdbs, - &proposal_msg, - &miner_privkey, - &miners_contract_id, - ) { - Ok(Some(chunk)) => chunk, - Ok(None) => { - warn!("Failed to propose block to stackerdb: no slot available"); - return Ok(()); - } - Err(e) => { - warn!("Failed to propose block to stackerdb: {e:?}"); - return Ok(()); - } - }; + let miner_privkey_as_scalar = Scalar::from(miner_privkey.as_slice().clone()); + let reward_set = self.load_signer_set()?; - // Propose the block to the observing signers through the .miners stackerdb instance - match miners_session.put_chunk(&proposal) { - Ok(ack) => { - info!( - "Proposed block to stackerdb"; - "signer_sighash" => %new_block.header.signer_signature_hash(), - "ack_msg" => ?ack, - ); - } - Err(e) => { - return Err(NakamotoNodeError::SigningCoordinatorFailure(format!( - "Failed to propose block to stackerdb {e:?}" - ))); - } + if self.config.get_node_config(false).mock_mining { + return Ok((reward_set, Vec::new())); } - self.globals.counters.bump_naka_proposed_blocks(); - Ok(()) + let mut coordinator = + SignCoordinator::new(&reward_set, miner_privkey_as_scalar, &self.config).map_err( + |e| { + NakamotoNodeError::SigningCoordinatorFailure(format!( + "Failed to initialize the signing coordinator. Cannot mine! {e:?}" + )) + }, + )?; + + *attempts += 1; + let signature = coordinator.begin_sign_v0( + new_block, + *attempts, + &tip, + &self.burnchain, + &sort_db, + &stackerdbs, + &self.globals.counters, + &self.burn_election_block.consensus_hash, + )?; + + return Ok((reward_set, signature)); } fn get_stackerdb_contract_and_slots( @@ -481,12 +546,12 @@ impl BlockMinerThread { let signer_chunks = stackerdbs .get_latest_chunks(&signers_contract_id, &slot_ids) .expect("FATAL: could not get latest chunks from stacker DB"); - let signer_messages: Vec<(u32, SignerMessage)> = slot_ids + let signer_messages: Vec<(u32, SignerMessageV1)> = slot_ids .iter() .zip(signer_chunks.into_iter()) .filter_map(|(slot_id, chunk)| { chunk.and_then(|chunk| { - read_next::(&mut &chunk[..]) + read_next::(&mut &chunk[..]) .ok() .map(|msg| (*slot_id, msg)) }) @@ -503,26 +568,32 @@ impl BlockMinerThread { // Get all nonces for the signers from clarity DB to use to validate transactions let account_nonces = chainstate - .with_read_only_clarity_tx(&sortdb.index_conn(), &stacks_block_id, |clarity_tx| { - clarity_tx.with_clarity_db_readonly(|clarity_db| { - addresses - .iter() - .map(|address| { - ( - address.clone(), - clarity_db - .get_account_nonce(&address.clone().into()) - .unwrap_or(0), - ) - }) - .collect::>() - }) - }) + .with_read_only_clarity_tx( + &sortdb + .index_handle_at_block(chainstate, &stacks_block_id) + .map_err(|_| NakamotoNodeError::UnexpectedChainState)?, + &stacks_block_id, + |clarity_tx| { + clarity_tx.with_clarity_db_readonly(|clarity_db| { + addresses + .iter() + .map(|address| { + ( + address.clone(), + clarity_db + .get_account_nonce(&address.clone().into()) + .unwrap_or(0), + ) + }) + .collect::>() + }) + }, + ) .unwrap_or_default(); let mut filtered_transactions: HashMap = HashMap::new(); for (_slot, signer_message) in signer_messages { match signer_message { - SignerMessage::Transactions(transactions) => { + SignerMessageV1::Transactions(transactions) => { NakamotoSigners::update_filtered_transactions( &mut filtered_transactions, &account_nonces, @@ -536,14 +607,64 @@ impl BlockMinerThread { Ok(filtered_transactions.into_values().collect()) } + /// Store a block to the chainstate, and if successful (it should be since we mined it), + /// broadcast it via the p2p network. + fn broadcast_p2p( + &mut self, + sort_db: &SortitionDB, + chain_state: &mut StacksChainState, + block: &NakamotoBlock, + reward_set: RewardSet, + ) -> Result<(), ChainstateError> { + #[cfg(test)] + { + if *TEST_SKIP_P2P_BROADCAST.lock().unwrap() == Some(true) { + return Ok(()); + } + } + + let mut sortition_handle = sort_db.index_handle_at_ch(&block.header.consensus_hash)?; + let chainstate_config = chain_state.config(); + let (headers_conn, staging_tx) = chain_state.headers_conn_and_staging_tx_begin()?; + let accepted = NakamotoChainState::accept_block( + &chainstate_config, + &block, + &mut sortition_handle, + &staging_tx, + headers_conn, + reward_set, + NakamotoBlockObtainMethod::Mined, + )?; + staging_tx.commit()?; + + if !accepted { + warn!("Did NOT accept block {} we mined", &block.block_id()); + + // not much we can do here, but try and mine again and hope we produce a valid one. + return Ok(()); + } + + // forward to p2p thread + let block_id = block.block_id(); + if let Err(e) = self.p2p_handle.broadcast_message( + vec![], + StacksMessageType::NakamotoBlocks(NakamotoBlocksData { + blocks: vec![block.clone()], + }), + ) { + warn!("Failed to broadcast block {}: {:?}", &block_id, &e); + } + Ok(()) + } + fn broadcast( - &self, + &mut self, block: NakamotoBlock, - aggregate_public_key: &Point, - ) -> Result<(), ChainstateError> { + reward_set: RewardSet, + stackerdbs: &StackerDBs, + ) -> Result<(), NakamotoNodeError> { let mut chain_state = neon_node::open_chainstate_with_faults(&self.config) .expect("FATAL: could not open chainstate DB"); - let chainstate_config = chain_state.config(); let sort_db = SortitionDB::open( &self.config.get_burn_db_file_path(), true, @@ -551,18 +672,43 @@ impl BlockMinerThread { ) .expect("FATAL: could not open sortition DB"); - let mut sortition_handle = sort_db.index_handle_at_tip(); - let (headers_conn, staging_tx) = chain_state.headers_conn_and_staging_tx_begin()?; - NakamotoChainState::accept_block( - &chainstate_config, - block, - &mut sortition_handle, - &staging_tx, - headers_conn, - &aggregate_public_key, - )?; - staging_tx.commit()?; - Ok(()) + if self.config.miner.mining_key.is_none() { + return Err(NakamotoNodeError::MinerConfigurationFailed( + "No mining key configured, cannot mine", + )); + }; + + // push block via p2p block push + self.broadcast_p2p(&sort_db, &mut chain_state, &block, reward_set) + .map_err(NakamotoNodeError::AcceptFailure)?; + + let Some(ref miner_privkey) = self.config.miner.mining_key else { + // should be unreachable, but we can't borrow this above broadcast_p2p() since it's + // mutable + return Err(NakamotoNodeError::MinerConfigurationFailed( + "No mining key configured, cannot mine", + )); + }; + + // also, push block via stackerdb to make sure stackers get it + let rpc_socket = self.config.node.get_rpc_loopback().ok_or_else(|| { + NakamotoNodeError::MinerConfigurationFailed("Failed to get RPC loopback socket") + })?; + let miners_contract_id = boot_code_id(MINERS_NAME, chain_state.mainnet); + let mut miners_session = StackerDBSession::new(&rpc_socket.to_string(), miners_contract_id); + + SignCoordinator::send_miners_message( + miner_privkey, + &sort_db, + &self.burn_block, + &stackerdbs, + SignerMessageV0::BlockPushed(block), + MinerSlotID::BlockPushed, + chain_state.mainnet, + &mut miners_session, + &self.burn_election_block.consensus_hash, + ) + .map_err(NakamotoNodeError::SigningCoordinatorFailure) } /// Get the coinbase recipient address, if set in the config and if allowed in this epoch @@ -576,25 +722,13 @@ impl BlockMinerThread { } fn generate_tenure_change_tx( - &mut self, + &self, nonce: u64, - parent_block_id: StacksBlockId, - parent_tenure_consensus_hash: ConsensusHash, - parent_tenure_blocks: u64, - miner_pkh: Hash160, + payload: TenureChangePayload, ) -> Result { let is_mainnet = self.config.is_mainnet(); let chain_id = self.config.burnchain.chain_id; - let tenure_change_tx_payload = TransactionPayload::TenureChange(TenureChangePayload { - tenure_consensus_hash: self.burn_block.consensus_hash.clone(), - prev_tenure_consensus_hash: parent_tenure_consensus_hash, - burn_view_consensus_hash: self.burn_block.consensus_hash.clone(), - previous_tenure_end: parent_block_id, - previous_tenure_blocks: u32::try_from(parent_tenure_blocks) - .expect("FATAL: more than u32 blocks in a tenure"), - cause: TenureChangeCause::BlockFound, - pubkey_hash: miner_pkh, - }); + let tenure_change_tx_payload = TransactionPayload::TenureChange(payload); let mut tx_auth = self.keychain.get_transaction_auth().unwrap(); tx_auth.set_origin_nonce(nonce); @@ -617,7 +751,7 @@ impl BlockMinerThread { /// Create a coinbase transaction. fn generate_coinbase_tx( - &mut self, + &self, nonce: u64, epoch_id: StacksEpochId, vrf_proof: VRFProof, @@ -652,37 +786,111 @@ impl BlockMinerThread { tx_signer.get_tx().unwrap() } + // TODO: add tests from mutation testing results #4869 + #[cfg_attr(test, mutants::skip)] /// Load up the parent block info for mining. - /// If there's no parent because this is the first block, then return the genesis block's info. - /// If we can't find the parent in the DB but we expect one, return None. + /// If we can't find the parent in the DB but we expect one, return Err(ParentNotFound). fn load_block_parent_info( &self, burn_db: &mut SortitionDB, chain_state: &mut StacksChainState, ) -> Result { - let Some(stacks_tip) = - NakamotoChainState::get_canonical_block_header(chain_state.db(), burn_db) - .expect("FATAL: could not query chain tip") - else { - debug!("No Stacks chain tip known, will return a genesis block"); - let burnchain_params = burnchain_params_from_config(&self.config.burnchain); - - let chain_tip = ChainTip::genesis( - &burnchain_params.first_block_hash, - burnchain_params.first_block_height.into(), - burnchain_params.first_block_timestamp.into(), + // The nakamoto miner must always build off of a chain tip that is the highest of: + // 1. The highest block in the miner's current tenure + // 2. The highest block in the current tenure's parent tenure + // Where the current tenure's parent tenure is the tenure start block committed to in the current tenure's associated block commit. + let stacks_tip_header = if let Some(block) = self.mined_blocks.last() { + test_debug!( + "Stacks block parent ID is last mined block {}", + &block.block_id() + ); + let stacks_block_id = block.block_id(); + NakamotoChainState::get_block_header(chain_state.db(), &stacks_block_id) + .map_err(|e| { + error!( + "Could not query header info for last-mined block ID {}: {:?}", + &stacks_block_id, &e + ); + NakamotoNodeError::ParentNotFound + })? + .ok_or_else(|| { + error!("No header for parent tenure ID {}", &stacks_block_id); + NakamotoNodeError::ParentNotFound + })? + } else { + // no mined blocks yet + test_debug!( + "Stacks block parent ID is last block in parent tenure ID {}", + &self.parent_tenure_id ); - return Ok(ParentStacksBlockInfo { - parent_tenure: Some(ParentTenureInfo { - parent_tenure_consensus_hash: chain_tip.metadata.consensus_hash, - parent_tenure_blocks: 0, - }), - stacks_parent_header: chain_tip.metadata, - coinbase_nonce: 0, - }); + // find the last block in the parent tenure, since this is the tip we'll build atop + let parent_tenure_header = + NakamotoChainState::get_block_header(chain_state.db(), &self.parent_tenure_id) + .map_err(|e| { + error!( + "Could not query header for parent tenure ID {}: {:?}", + &self.parent_tenure_id, &e + ); + NakamotoNodeError::ParentNotFound + })? + .ok_or_else(|| { + error!("No header for parent tenure ID {}", &self.parent_tenure_id); + NakamotoNodeError::ParentNotFound + })?; + + // NOTE: this is the soon-to-be parent's block ID, since it's the tip we mine on top + // of. We're only interested in performing queries relative to the canonical tip. + let (stacks_tip_ch, stacks_tip_bh) = + SortitionDB::get_canonical_stacks_chain_tip_hash(burn_db.conn()).map_err(|e| { + error!("Failed to load canonical Stacks tip: {:?}", &e); + NakamotoNodeError::ParentNotFound + })?; + + let stacks_tip = StacksBlockId::new(&stacks_tip_ch, &stacks_tip_bh); + let header_opt = NakamotoChainState::get_highest_block_header_in_tenure( + &mut chain_state.index_conn(), + &stacks_tip, + &parent_tenure_header.consensus_hash, + ) + .map_err(|e| { + error!("Could not query parent tenure finish block: {:?}", &e); + NakamotoNodeError::ParentNotFound + })?; + if let Some(header) = header_opt { + header + } else { + // this is an epoch2 block + debug!( + "Stacks block parent ID may be an epoch2x block: {}", + &self.parent_tenure_id + ); + let epoch2_header = + NakamotoChainState::get_block_header(chain_state.db(), &self.parent_tenure_id) + .map_err(|e| { + error!( + "Could not query header info for epoch2x tenure block ID {}: {:?}", + &self.parent_tenure_id, &e + ); + NakamotoNodeError::ParentNotFound + })? + .ok_or_else(|| { + error!( + "No header info for epoch2x tenure block ID {}", + &self.parent_tenure_id + ); + NakamotoNodeError::ParentNotFound + })?; + + epoch2_header + } }; + test_debug!( + "Miner: stacks tip parent header is {} {:?}", + &stacks_tip_header.index_block_hash(), + &stacks_tip_header + ); let miner_address = self .keychain .origin_address(self.config.is_mainnet()) @@ -693,7 +901,7 @@ impl BlockMinerThread { &self.burn_block, miner_address, &self.parent_tenure_id, - stacks_tip, + stacks_tip_header, ) { Ok(parent_info) => Ok(parent_info), Err(NakamotoNodeError::BurnchainTipChanged) => { @@ -710,7 +918,7 @@ impl BlockMinerThread { fn make_vrf_proof(&mut self) -> Option { // if we're a mock miner, then make sure that the keychain has a keypair for the mocked VRF // key - let vrf_proof = if self.config.node.mock_mining { + let vrf_proof = if self.config.get_node_config(false).mock_mining { self.keychain.generate_proof( VRF_MOCK_MINER_KEY, self.burn_block.sortition_hash.as_bytes(), @@ -733,12 +941,15 @@ impl BlockMinerThread { Some(vrf_proof) } + // TODO: add tests from mutation testing results #4869 + #[cfg_attr(test, mutants::skip)] /// Try to mine a Stacks block by assembling one from mempool transactions and sending a /// burnchain block-commit transaction. If we succeed, then return the assembled block. fn mine_block(&mut self, stackerdbs: &StackerDBs) -> Result { debug!("block miner thread ID is {:?}", thread::current().id()); let burn_db_path = self.config.get_burn_db_file_path(); + let reward_set = self.load_signer_set()?; // NOTE: read-write access is needed in order to be able to query the recipient set. // This is an artifact of the way the MARF is built (see #1449) @@ -767,66 +978,44 @@ impl BlockMinerThread { .make_vrf_proof() .ok_or_else(|| NakamotoNodeError::BadVrfConstruction)?; - if self.mined_blocks.is_empty() { - if parent_block_info.parent_tenure.is_none() { - warn!( - "Miner should be starting a new tenure, but failed to load parent tenure info" - ); - return Err(NakamotoNodeError::ParentNotFound); - } - } + if self.mined_blocks.is_empty() && parent_block_info.parent_tenure.is_none() { + warn!("Miner should be starting a new tenure, but failed to load parent tenure info"); + return Err(NakamotoNodeError::ParentNotFound); + }; // create our coinbase if this is the first block we've mined this tenure - let tenure_start_info = if let Some(ref par_tenure_info) = parent_block_info.parent_tenure { - let parent_block_id = parent_block_info.stacks_parent_header.index_block_hash(); - let current_miner_nonce = parent_block_info.coinbase_nonce; - let tenure_change_tx = self.generate_tenure_change_tx( - current_miner_nonce, - parent_block_id, - par_tenure_info.parent_tenure_consensus_hash, - par_tenure_info.parent_tenure_blocks, - self.keychain.get_nakamoto_pkh(), - )?; - let coinbase_tx = - self.generate_coinbase_tx(current_miner_nonce + 1, target_epoch_id, vrf_proof); - NakamotoTenureInfo { - coinbase_tx: Some(coinbase_tx), - tenure_change_tx: Some(tenure_change_tx), - } - } else { - NakamotoTenureInfo { - coinbase_tx: None, - tenure_change_tx: None, - } - }; + let tenure_start_info = self.make_tenure_start_info( + &chain_state, + &parent_block_info, + vrf_proof, + target_epoch_id, + )?; parent_block_info.stacks_parent_header.microblock_tail = None; - let block_num = u64::try_from(self.mined_blocks.len()) - .map_err(|_| NakamotoNodeError::UnexpectedChainState)? - .saturating_add(1); - let signer_transactions = self.get_signer_transactions(&mut chain_state, &burn_db, &stackerdbs)?; + let signer_bitvec_len = reward_set.rewarded_addresses.len().try_into().ok(); + // build the block itself let (mut block, consumed, size, tx_events) = NakamotoBlockBuilder::build_nakamoto_block( &chain_state, - &burn_db.index_conn(), + &burn_db + .index_handle_at_ch(&self.burn_block.consensus_hash) + .map_err(|_| NakamotoNodeError::UnexpectedChainState)?, &mut mem_pool, &parent_block_info.stacks_parent_header, - &self.burn_block.consensus_hash, + &self.burn_election_block.consensus_hash, self.burn_block.total_burn, tenure_start_info, - self.config.make_block_builder_settings( - block_num, - false, - self.globals.get_miner_status(), - ), + self.config + .make_nakamoto_block_builder_settings(self.globals.get_miner_status()), // we'll invoke the event dispatcher ourselves so that it calculates the // correct signer_sighash for `process_mined_nakamoto_block_event` Some(&self.event_dispatcher), signer_transactions, + signer_bitvec_len.unwrap_or(0), ) .map_err(|e| { if !matches!( @@ -843,7 +1032,6 @@ impl BlockMinerThread { ChainstateError::NoTransactionsToMine, )); } - let mining_key = self.keychain.get_nakamoto_sk(); let miner_signature = mining_key .sign(block.header.miner_signature_hash().as_bytes()) @@ -856,6 +1044,9 @@ impl BlockMinerThread { block.header.block_hash(), block.txs.len(); "signer_sighash" => %block.header.signer_signature_hash(), + "consensus_hash" => %block.header.consensus_hash, + "parent_block_id" => %block.header.parent_block_id, + "timestamp" => block.header.timestamp, ); self.event_dispatcher.process_mined_nakamoto_block_event( @@ -868,11 +1059,73 @@ impl BlockMinerThread { // last chance -- confirm that the stacks tip is unchanged (since it could have taken long // enough to build this block that another block could have arrived), and confirm that all - // Stacks blocks with heights higher than the canoincal tip are processed. + // Stacks blocks with heights higher than the canonical tip are processed. self.check_burn_tip_changed(&burn_db)?; Ok(block) } + /// Create the tenure start info for the block we're going to build + fn make_tenure_start_info( + &self, + chainstate: &StacksChainState, + parent_block_info: &ParentStacksBlockInfo, + vrf_proof: VRFProof, + target_epoch_id: StacksEpochId, + ) -> Result { + let current_miner_nonce = parent_block_info.coinbase_nonce; + let Some(parent_tenure_info) = &parent_block_info.parent_tenure else { + return Ok(NakamotoTenureInfo { + coinbase_tx: None, + tenure_change_tx: None, + }); + }; + + let parent_block_id = parent_block_info.stacks_parent_header.index_block_hash(); + let mut payload = TenureChangePayload { + tenure_consensus_hash: self.burn_election_block.consensus_hash.clone(), + prev_tenure_consensus_hash: parent_tenure_info.parent_tenure_consensus_hash, + burn_view_consensus_hash: self.burn_election_block.consensus_hash.clone(), + previous_tenure_end: parent_block_id, + previous_tenure_blocks: u32::try_from(parent_tenure_info.parent_tenure_blocks) + .expect("FATAL: more than u32 blocks in a tenure"), + cause: TenureChangeCause::BlockFound, + pubkey_hash: self.keychain.get_nakamoto_pkh(), + }; + + let (tenure_change_tx, coinbase_tx) = match &self.reason { + MinerReason::BlockFound => { + let tenure_change_tx = + self.generate_tenure_change_tx(current_miner_nonce, payload)?; + let coinbase_tx = + self.generate_coinbase_tx(current_miner_nonce + 1, target_epoch_id, vrf_proof); + (Some(tenure_change_tx), Some(coinbase_tx)) + } + MinerReason::Extended { + burn_view_consensus_hash, + } => { + let num_blocks_so_far = NakamotoChainState::get_nakamoto_tenure_length( + chainstate.db(), + &parent_block_id, + ) + .map_err(NakamotoNodeError::MiningFailure)?; + debug!("Miner: Extending tenure"; "burn_view_consensus_hash" => %burn_view_consensus_hash, "parent_block_id" => %parent_block_id, "num_blocks_so_far" => num_blocks_so_far); + payload = payload.extend( + *burn_view_consensus_hash, + parent_block_id, + num_blocks_so_far, + ); + let tenure_change_tx = + self.generate_tenure_change_tx(current_miner_nonce, payload)?; + (Some(tenure_change_tx), None) + } + }; + + Ok(NakamotoTenureInfo { + coinbase_tx, + tenure_change_tx, + }) + } + /// Check if the tenure needs to change -- if so, return a BurnchainTipChanged error fn check_burn_tip_changed(&self, sortdb: &SortitionDB) -> Result<(), NakamotoNodeError> { let cur_burn_chain_tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()) @@ -889,6 +1142,8 @@ impl BlockMinerThread { } impl ParentStacksBlockInfo { + // TODO: add tests from mutation testing results #4869 + #[cfg_attr(test, mutants::skip)] /// Determine where in the set of forks to attempt to mine the next anchored block. /// `mine_tip_ch` and `mine_tip_bhh` identify the parent block on top of which to mine. /// `check_burn_block` identifies what we believe to be the burn chain's sortition history tip. @@ -938,14 +1193,16 @@ impl ParentStacksBlockInfo { let parent_tenure_info = if stacks_tip_header.consensus_hash == parent_tenure_header.consensus_hash { + // in the same tenure let parent_tenure_blocks = if parent_tenure_header .anchored_header .as_stacks_nakamoto() .is_some() { let Ok(Some(last_parent_tenure_header)) = - NakamotoChainState::get_nakamoto_tenure_finish_block_header( - chain_state.db(), + NakamotoChainState::get_highest_block_header_in_tenure( + &mut chain_state.index_conn(), + &stacks_tip_header.index_block_hash(), &parent_tenure_header.consensus_hash, ) else { @@ -956,6 +1213,9 @@ impl ParentStacksBlockInfo { if stacks_tip_header.index_block_hash() != last_parent_tenure_header.index_block_hash() { + warn!("Last known tenure block of parent tenure should be the stacks tip"; + "stacks_tip_header" => %stacks_tip_header.index_block_hash(), + "last_parent_tenure_header" => %last_parent_tenure_header.index_block_hash()); return Err(NakamotoNodeError::NewParentDiscovered); } 1 + last_parent_tenure_header.stacks_block_height @@ -990,7 +1250,9 @@ impl ParentStacksBlockInfo { let principal = miner_address.into(); let account = chain_state .with_read_only_clarity_tx( - &burn_db.index_conn(), + &burn_db + .index_handle_at_block(&chain_state, &stacks_tip_header.index_block_hash()) + .map_err(|_| NakamotoNodeError::UnexpectedChainState)?, &stacks_tip_header.index_block_hash(), |conn| StacksChainState::get_account(conn, &principal), ) diff --git a/testnet/stacks-node/src/nakamoto_node/peer.rs b/testnet/stacks-node/src/nakamoto_node/peer.rs index eeb6789d30c..dc060e06b6d 100644 --- a/testnet/stacks-node/src/nakamoto_node/peer.rs +++ b/testnet/stacks-node/src/nakamoto_node/peer.rs @@ -182,8 +182,13 @@ impl PeerThread { .parse() .unwrap_or_else(|_| panic!("Failed to parse socket: {}", &config.node.rpc_bind)); - net.bind(&p2p_sock, &rpc_sock) - .expect("BUG: PeerNetwork could not bind or is already bound"); + let did_bind = net + .try_bind(&p2p_sock, &rpc_sock) + .expect("BUG: PeerNetwork could not bind"); + + if !did_bind { + info!("`PeerNetwork::bind()` skipped, already bound"); + } let poll_timeout = cmp::min(5000, config.miner.first_attempt_time_ms / 2); diff --git a/testnet/stacks-node/src/nakamoto_node/relayer.rs b/testnet/stacks-node/src/nakamoto_node/relayer.rs index f638ae93241..21e050269e7 100644 --- a/testnet/stacks-node/src/nakamoto_node/relayer.rs +++ b/testnet/stacks-node/src/nakamoto_node/relayer.rs @@ -15,6 +15,8 @@ // along with this program. If not, see . use core::fmt; use std::collections::HashSet; +use std::fs; +use std::io::Read; use std::sync::mpsc::{Receiver, RecvTimeoutError}; use std::thread::JoinHandle; use std::time::{Duration, Instant}; @@ -36,21 +38,21 @@ use stacks::chainstate::stacks::miner::{ get_mining_spend_amount, signal_mining_blocked, signal_mining_ready, }; use stacks::core::mempool::MemPoolDB; -use stacks::core::{ - FIRST_BURNCHAIN_CONSENSUS_HASH, FIRST_STACKS_BLOCK_HASH, STACKS_EPOCH_3_0_MARKER, -}; +use stacks::core::STACKS_EPOCH_3_0_MARKER; use stacks::monitoring::increment_stx_blocks_mined_counter; use stacks::net::db::LocalPeer; +use stacks::net::p2p::NetworkHandle; use stacks::net::relay::Relayer; use stacks::net::NetworkResult; use stacks_common::types::chainstate::{ - BlockHeaderHash, BurnchainHeaderHash, StacksBlockId, VRFSeed, + BlockHeaderHash, BurnchainHeaderHash, StacksBlockId, StacksPublicKey, VRFSeed, }; use stacks_common::types::StacksEpochId; use stacks_common::util::get_epoch_time_ms; use stacks_common::util::hash::Hash160; -use stacks_common::util::vrf::{VRFProof, VRFPublicKey}; +use stacks_common::util::vrf::VRFPublicKey; +use super::miner::MinerReason; use super::{ BlockCommits, Config, Error as NakamotoNodeError, EventDispatcher, Keychain, BLOCK_PROCESSOR_STACK_SIZE, @@ -93,6 +95,82 @@ impl fmt::Display for RelayerDirective { } } +/// Last commitment data +/// This represents the tenure that the last-sent block-commit committed to. +pub struct LastCommit { + /// block-commit sent + block_commit: LeaderBlockCommitOp, + /// the sortition tip at the time the block-commit was sent + burn_tip: BlockSnapshot, + /// the stacks tip at the time the block-commit was sent + stacks_tip: StacksBlockId, + /// the tenure consensus hash for the tip's tenure + tenure_consensus_hash: ConsensusHash, + /// the start-block hash of the tip's tenure + #[allow(dead_code)] + start_block_hash: BlockHeaderHash, + /// What is the epoch in which this was sent? + epoch_id: StacksEpochId, + /// commit txid (to be filled in on submission) + txid: Option, +} + +impl LastCommit { + pub fn new( + commit: LeaderBlockCommitOp, + burn_tip: BlockSnapshot, + stacks_tip: StacksBlockId, + tenure_consensus_hash: ConsensusHash, + start_block_hash: BlockHeaderHash, + epoch_id: StacksEpochId, + ) -> Self { + Self { + block_commit: commit, + burn_tip, + stacks_tip, + tenure_consensus_hash, + start_block_hash, + epoch_id, + txid: None, + } + } + + /// Get the commit + pub fn get_block_commit(&self) -> &LeaderBlockCommitOp { + &self.block_commit + } + + /// What's the parent tenure's tenure-start block hash? + pub fn parent_tenure_id(&self) -> StacksBlockId { + StacksBlockId(self.block_commit.block_header_hash.clone().0) + } + + /// What's the stacks tip at the time of commit? + pub fn get_stacks_tip(&self) -> &StacksBlockId { + &self.stacks_tip + } + + /// What's the burn tip at the time of commit? + pub fn get_burn_tip(&self) -> &BlockSnapshot { + &self.burn_tip + } + + /// What's the epoch in which this was sent? + pub fn get_epoch_id(&self) -> &StacksEpochId { + &self.epoch_id + } + + /// Get the tenure ID of the tenure this commit builds on + pub fn get_tenure_id(&self) -> &ConsensusHash { + &self.tenure_consensus_hash + } + + /// Set our txid + pub fn set_txid(&mut self, txid: &Txid) { + self.txid = Some(txid.clone()); + } +} + /// Relayer thread /// * accepts network results and stores blocks and microblocks /// * forwards new blocks, microblocks, and transactions to the p2p thread @@ -149,14 +227,14 @@ pub struct RelayerThread { relayer: Relayer, /// handle to the subordinate miner thread - miner_thread: Option>, + miner_thread: Option>>, /// The relayer thread reads directives from the relay_rcv, but it also periodically wakes up /// to check if it should issue a block commit or try to register a VRF key next_initiative: Instant, is_miner: bool, - /// This is the last snapshot in which the relayer committed, and the parent_tenure_id - /// which was committed to - last_committed: Option<(BlockSnapshot, StacksBlockId)>, + /// Information about the last-sent block commit, and the relayer's view of the chain at the + /// time it was sent. + last_committed: Option, } impl RelayerThread { @@ -185,8 +263,10 @@ impl RelayerThread { let bitcoin_controller = BitcoinRegtestController::new_dummy(config.clone()); + let next_initiative_delay = config.node.next_initiative_delay; + RelayerThread { - config: config, + config, sortdb, chainstate, mempool, @@ -210,11 +290,16 @@ impl RelayerThread { miner_thread: None, is_miner, - next_initiative: Instant::now() + Duration::from_secs(10), + next_initiative: Instant::now() + Duration::from_millis(next_initiative_delay), last_committed: None, } } + /// Get a handle to the p2p thread + pub fn get_p2p_handle(&self) -> NetworkHandle { + self.relayer.get_p2p_handle() + } + /// have we waited for the right conditions under which to start mining a block off of our /// chain tip? fn has_waited_for_latest_blocks(&self) -> bool { @@ -251,6 +336,7 @@ impl RelayerThread { .process_network_result( &self.local_peer, &mut net_result, + &self.burnchain, &mut self.sortdb, &mut self.chainstate, &mut self.mempool, @@ -291,13 +377,13 @@ impl RelayerThread { /// Given the pointer to a recently processed sortition, see if we won the sortition. /// - /// Returns `true` if we won this last sortition. + /// Returns a directive to the relayer thread to either start, stop, or continue a tenure. pub fn process_sortition( &mut self, consensus_hash: ConsensusHash, burn_hash: BurnchainHeaderHash, committed_index_hash: StacksBlockId, - ) -> MinerDirective { + ) -> Result { let sn = SortitionDB::get_block_snapshot_consensus(self.sortdb.conn(), &consensus_hash) .expect("FATAL: failed to query sortition DB") .expect("FATAL: unknown consensus hash"); @@ -320,8 +406,8 @@ impl RelayerThread { increment_stx_blocks_mined_counter(); } - if sn.sortition { - if won_sortition { + let directive = if sn.sortition { + if won_sortition || self.config.get_node_config(false).mock_mining { MinerDirective::BeginTenure { parent_tenure_start: committed_index_hash, burnchain_tip: sn, @@ -333,7 +419,8 @@ impl RelayerThread { MinerDirective::ContinueTenure { new_burn_view: consensus_hash, } - } + }; + Ok(directive) } /// Constructs and returns a LeaderKeyRegisterOp out of the provided params @@ -390,133 +477,186 @@ impl RelayerThread { } } - /// Produce the block-commit for this anchored block, if we can. - /// `target_ch` is the consensus-hash of the Tenure we will build off - /// `target_bh` is the block hash of the Tenure we will build off - /// Returns the (the most recent burn snapshot, the expected epoch, the commit-op) on success + /// Produce the block-commit for this upcoming tenure, if we can. + /// + /// Takes the Nakamoto chain tip (consensus hash, block header hash). + /// + /// Returns the (the most recent burn snapshot, the most recent stakcs tip, the commit-op) on success /// Returns None if we fail somehow. - fn make_block_commit( + /// + /// TODO: unit test + pub(crate) fn make_block_commit( &mut self, - target_ch: &ConsensusHash, - target_bh: &BlockHeaderHash, - ) -> Result<(BlockSnapshot, StacksEpochId, LeaderBlockCommitOp), NakamotoNodeError> { + tip_block_ch: &ConsensusHash, + tip_block_bh: &BlockHeaderHash, + ) -> Result { let sort_tip = SortitionDB::get_canonical_burn_chain_tip(self.sortdb.conn()) .map_err(|_| NakamotoNodeError::SnapshotNotFoundForChainTip)?; - let parent_vrf_proof = - NakamotoChainState::get_block_vrf_proof(self.chainstate.db(), &target_ch) - .map_err(|_e| NakamotoNodeError::ParentNotFound)? - .unwrap_or_else(|| VRFProof::empty()); + let stacks_tip = StacksBlockId::new(tip_block_ch, tip_block_bh); + + // sanity check -- this block must exist and have been processed locally + let highest_tenure_start_block_header = NakamotoChainState::get_tenure_start_block_header( + &mut self.chainstate.index_conn(), + &stacks_tip, + &tip_block_ch, + ) + .map_err(|e| { + error!( + "Relayer: Failed to get tenure-start block header for stacks tip {}: {:?}", + &stacks_tip, &e + ); + NakamotoNodeError::ParentNotFound + })? + .ok_or_else(|| { + error!( + "Relayer: Failed to find tenure-start block header for stacks tip {}", + &stacks_tip + ); + NakamotoNodeError::ParentNotFound + })?; + + // load the VRF proof generated in this tenure, so we can use it to seed the VRF in the + // upcoming tenure. This may be an epoch2x VRF proof. + let tip_vrf_proof = NakamotoChainState::get_block_vrf_proof( + &mut self.chainstate.index_conn(), + &stacks_tip, + tip_block_ch, + ) + .map_err(|e| { + error!( + "Failed to load VRF proof for {} off of {}: {:?}", + tip_block_ch, &stacks_tip, &e + ); + NakamotoNodeError::ParentNotFound + })? + .ok_or_else(|| { + error!( + "No block VRF proof for {} off of {}", + tip_block_ch, &stacks_tip + ); + NakamotoNodeError::ParentNotFound + })?; // let's figure out the recipient set! - let recipients = get_nakamoto_next_recipients(&sort_tip, &mut self.sortdb, &self.burnchain) - .map_err(|e| { - error!("Relayer: Failure fetching recipient set: {:?}", e); - NakamotoNodeError::SnapshotNotFoundForChainTip - })?; - - let block_header = - NakamotoChainState::get_block_header_by_consensus_hash(self.chainstate.db(), target_ch) - .map_err(|e| { - error!("Relayer: Failed to get block header for parent tenure: {e:?}"); - NakamotoNodeError::ParentNotFound - })? - .ok_or_else(|| { - error!("Relayer: Failed to find block header for parent tenure"); - NakamotoNodeError::ParentNotFound - })?; - - let parent_block_id = block_header.index_block_hash(); - if parent_block_id != StacksBlockId::new(target_ch, target_bh) { - error!("Relayer: Found block header for parent tenure, but mismatched block id"; - "expected_block_id" => %StacksBlockId::new(target_ch, target_bh), - "found_block_id" => %parent_block_id); - return Err(NakamotoNodeError::UnexpectedChainState); - } - - let Ok(Some(parent_sortition)) = - SortitionDB::get_block_snapshot_consensus(self.sortdb.conn(), target_ch) - else { - error!("Relayer: Failed to lookup the block snapshot of parent tenure ID"; "tenure_consensus_hash" => %target_ch); - return Err(NakamotoNodeError::ParentNotFound); + let recipients = get_nakamoto_next_recipients( + &sort_tip, + &mut self.sortdb, + &mut self.chainstate, + &stacks_tip, + &self.burnchain, + ) + .map_err(|e| { + error!("Relayer: Failure fetching recipient set: {:?}", e); + NakamotoNodeError::SnapshotNotFoundForChainTip + })?; + + let commit_outs = if self + .burnchain + .is_in_prepare_phase(sort_tip.block_height + 1) + { + vec![PoxAddress::standard_burn_address(self.config.is_mainnet())] + } else { + RewardSetInfo::into_commit_outs(recipients, self.config.is_mainnet()) }; - let Ok(Some(target_epoch)) = - SortitionDB::get_stacks_epoch(self.sortdb.conn(), sort_tip.block_height + 1) + // find the sortition that kicked off this tenure (it may be different from the sortition + // tip, such as when there is no sortition or when the miner of the current sortition never + // produces a block). This is used to find the parent block-commit of the block-commit + // we'll submit. + let Ok(Some(tip_tenure_sortition)) = + SortitionDB::get_block_snapshot_consensus(self.sortdb.conn(), tip_block_ch) else { - error!("Relayer: Failed to lookup its epoch"; "target_height" => sort_tip.block_height + 1); - return Err(NakamotoNodeError::SnapshotNotFoundForChainTip); + error!("Relayer: Failed to lookup the block snapshot of highest tenure ID"; "tenure_consensus_hash" => %tip_block_ch); + return Err(NakamotoNodeError::ParentNotFound); }; - let parent_block_burn_height = parent_sortition.block_height; + // find the parent block-commit of this commit + let commit_parent_block_burn_height = tip_tenure_sortition.block_height; let Ok(Some(parent_winning_tx)) = SortitionDB::get_block_commit( self.sortdb.conn(), - &parent_sortition.winning_block_txid, - &parent_sortition.sortition_id, + &tip_tenure_sortition.winning_block_txid, + &tip_tenure_sortition.sortition_id, ) else { - error!("Relayer: Failed to lookup the block commit of parent tenure ID"; "tenure_consensus_hash" => %target_ch); + error!("Relayer: Failed to lookup the block commit of parent tenure ID"; "tenure_consensus_hash" => %tip_block_ch); return Err(NakamotoNodeError::SnapshotNotFoundForChainTip); }; - let parent_winning_vtxindex = parent_winning_tx.vtxindex; + let commit_parent_winning_vtxindex = parent_winning_tx.vtxindex; - let burn_fee_cap = get_mining_spend_amount(self.globals.get_miner_status()); - let sunset_burn = self.burnchain.expected_sunset_burn( - sort_tip.block_height + 1, - burn_fee_cap, - target_epoch.epoch_id, - ); - let rest_commit = burn_fee_cap - sunset_burn; - - let commit_outs = if !self - .burnchain - .pox_constants - .is_after_pox_sunset_end(sort_tip.block_height, target_epoch.epoch_id) - && !self - .burnchain - .is_in_prepare_phase(sort_tip.block_height + 1) - { - RewardSetInfo::into_commit_outs(recipients, self.config.is_mainnet()) - } else { - vec![PoxAddress::standard_burn_address(self.config.is_mainnet())] + // epoch in which this commit will be sent (affects how the burnchain client processes it) + let Ok(Some(target_epoch)) = + SortitionDB::get_stacks_epoch(self.sortdb.conn(), sort_tip.block_height + 1) + else { + error!("Relayer: Failed to lookup its epoch"; "target_height" => sort_tip.block_height + 1); + return Err(NakamotoNodeError::SnapshotNotFoundForChainTip); }; - // let's commit, but target the current burnchain tip with our modulus + // amount of burnchain tokens (e.g. sats) we'll spend across the PoX outputs + let burn_fee_cap = get_mining_spend_amount(self.globals.get_miner_status()); + + // let's commit, but target the current burnchain tip with our modulus so the commit is + // only valid if it lands in the targeted burnchain block height let burn_parent_modulus = u8::try_from(sort_tip.block_height % BURN_BLOCK_MINED_AT_MODULUS) .map_err(|_| { error!("Relayer: Block mining modulus is not u8"); NakamotoNodeError::UnexpectedChainState })?; + + // burnchain signer for this commit let sender = self.keychain.get_burnchain_signer(); + + // VRF key this commit uses (i.e. the one we registered) let key = self .globals .get_leader_key_registration_state() .get_active() .ok_or_else(|| NakamotoNodeError::NoVRFKeyActive)?; - let op = LeaderBlockCommitOp { - sunset_burn, - block_header_hash: BlockHeaderHash(parent_block_id.0), - burn_fee: rest_commit, - input: (Txid([0; 32]), 0), + + let commit = LeaderBlockCommitOp { + // NOTE: to be filled in + treatment: vec![], + // NOTE: PoX sunset has been disabled prior to taking effect + sunset_burn: 0, + // block-commits in Nakamoto commit to the ongoing tenure's tenure-start block (which, + // when processed, become the start-block of the tenure atop which this miner will + // produce blocks) + block_header_hash: BlockHeaderHash( + highest_tenure_start_block_header.index_block_hash().0, + ), + // the rest of this is the same as epoch2x commits, modulo the new epoch marker + burn_fee: burn_fee_cap, apparent_sender: sender, key_block_ptr: u32::try_from(key.block_height) .expect("FATAL: burn block height exceeded u32"), key_vtxindex: u16::try_from(key.op_vtxindex).expect("FATAL: vtxindex exceeded u16"), memo: vec![STACKS_EPOCH_3_0_MARKER], - new_seed: VRFSeed::from_proof(&parent_vrf_proof), - parent_block_ptr: u32::try_from(parent_block_burn_height) + new_seed: VRFSeed::from_proof(&tip_vrf_proof), + parent_block_ptr: u32::try_from(commit_parent_block_burn_height) .expect("FATAL: burn block height exceeded u32"), - parent_vtxindex: u16::try_from(parent_winning_vtxindex) + parent_vtxindex: u16::try_from(commit_parent_winning_vtxindex) .expect("FATAL: vtxindex exceeded u16"), + burn_parent_modulus, + commit_outs, + + // NOTE: to be filled in + input: (Txid([0; 32]), 0), vtxindex: 0, txid: Txid([0u8; 32]), block_height: 0, burn_header_hash: BurnchainHeaderHash::zero(), - burn_parent_modulus, - commit_outs, }; - Ok((sort_tip, target_epoch.epoch_id, op)) + Ok(LastCommit::new( + commit, + sort_tip, + stacks_tip, + highest_tenure_start_block_header.consensus_hash, + highest_tenure_start_block_header + .anchored_header + .block_hash(), + target_epoch.epoch_id, + )) } /// Create the block miner thread state. @@ -532,18 +672,20 @@ impl RelayerThread { fn create_block_miner( &mut self, registered_key: RegisteredKey, - last_burn_block: BlockSnapshot, + burn_election_block: BlockSnapshot, + burn_tip: BlockSnapshot, parent_tenure_id: StacksBlockId, + reason: MinerReason, ) -> Result { - if fault_injection_skip_mining(&self.config.node.rpc_bind, last_burn_block.block_height) { + if fault_injection_skip_mining(&self.config.node.rpc_bind, burn_tip.block_height) { debug!( "Relayer: fault injection skip mining at block height {}", - last_burn_block.block_height + burn_tip.block_height ); return Err(NakamotoNodeError::FaultInjection); } - let burn_header_hash = last_burn_block.burn_header_hash.clone(); + let burn_header_hash = burn_tip.burn_header_hash.clone(); let burn_chain_sn = SortitionDB::get_canonical_burn_chain_tip(self.sortdb.conn()) .expect("FATAL: failed to query sortition DB for canonical burn chain tip"); @@ -560,20 +702,31 @@ impl RelayerThread { debug!( "Relayer: Spawn tenure thread"; - "height" => last_burn_block.block_height, + "height" => burn_tip.block_height, "burn_header_hash" => %burn_header_hash, "parent_tenure_id" => %parent_tenure_id, + "reason" => %reason, + "burn_election_block.consensus_hash" => %burn_election_block.consensus_hash, + "burn_tip.consensus_hash" => %burn_tip.consensus_hash, ); - let miner_thread_state = - BlockMinerThread::new(self, registered_key, last_burn_block, parent_tenure_id); + let miner_thread_state = BlockMinerThread::new( + self, + registered_key, + burn_election_block, + burn_tip, + parent_tenure_id, + reason, + ); Ok(miner_thread_state) } fn start_new_tenure( &mut self, parent_tenure_start: StacksBlockId, + block_election_snapshot: BlockSnapshot, burn_tip: BlockSnapshot, + reason: MinerReason, ) -> Result<(), NakamotoNodeError> { // when starting a new tenure, block the mining thread if its currently running. // the new mining thread will join it (so that the new mining thread stalls, not the relayer) @@ -586,7 +739,13 @@ impl RelayerThread { warn!("Trying to start new tenure, but no VRF key active"); NakamotoNodeError::NoVRFKeyActive })?; - let new_miner_state = self.create_block_miner(vrf_key, burn_tip, parent_tenure_start)?; + let new_miner_state = self.create_block_miner( + vrf_key, + block_election_snapshot, + burn_tip, + parent_tenure_start, + reason, + )?; let new_miner_handle = std::thread::Builder::new() .name(format!("miner.{parent_tenure_start}")) @@ -628,37 +787,124 @@ impl RelayerThread { Ok(()) } + fn continue_tenure(&mut self, new_burn_view: ConsensusHash) -> Result<(), NakamotoNodeError> { + if let Err(e) = self.stop_tenure() { + error!("Relayer: Failed to stop tenure: {e:?}"); + return Ok(()); + } + debug!("Relayer: successfully stopped tenure."); + // Check if we should undergo a tenure change to switch to the new burn view + let burn_tip = + SortitionDB::get_block_snapshot_consensus(self.sortdb.conn(), &new_burn_view) + .map_err(|e| { + error!("Relayer: failed to get block snapshot for new burn view: {e:?}"); + NakamotoNodeError::SnapshotNotFoundForChainTip + })? + .ok_or_else(|| { + error!("Relayer: failed to get block snapshot for new burn view"); + NakamotoNodeError::SnapshotNotFoundForChainTip + })?; + + let (canonical_stacks_tip_ch, canonical_stacks_tip_bh) = + SortitionDB::get_canonical_stacks_chain_tip_hash(self.sortdb.conn()).unwrap(); + let canonical_stacks_tip = + StacksBlockId::new(&canonical_stacks_tip_ch, &canonical_stacks_tip_bh); + let block_election_snapshot = + SortitionDB::get_block_snapshot_consensus(self.sortdb.conn(), &canonical_stacks_tip_ch) + .map_err(|e| { + error!("Relayer: failed to get block snapshot for canonical tip: {e:?}"); + NakamotoNodeError::SnapshotNotFoundForChainTip + })? + .ok_or_else(|| { + error!("Relayer: failed to get block snapshot for canonical tip"); + NakamotoNodeError::SnapshotNotFoundForChainTip + })?; + + let Some(ref mining_key) = self.config.miner.mining_key else { + return Ok(()); + }; + let mining_pkh = Hash160::from_node_public_key(&StacksPublicKey::from_private(mining_key)); + + let last_winner_snapshot = { + let ih = self.sortdb.index_handle(&burn_tip.sortition_id); + ih.get_last_snapshot_with_sortition(burn_tip.block_height) + .map_err(|e| { + error!("Relayer: failed to get last snapshot with sortition: {e:?}"); + NakamotoNodeError::SnapshotNotFoundForChainTip + })? + }; + + let won_last_sortition = last_winner_snapshot.miner_pk_hash == Some(mining_pkh); + debug!( + "Relayer: Current burn block had no sortition. Checking for tenure continuation."; + "won_last_sortition" => won_last_sortition, + "current_mining_pkh" => %mining_pkh, + "last_winner_snapshot.miner_pk_hash" => ?last_winner_snapshot.miner_pk_hash, + "canonical_stacks_tip_id" => %canonical_stacks_tip, + "canonical_stacks_tip_ch" => %canonical_stacks_tip_ch, + "block_election_ch" => %block_election_snapshot.consensus_hash, + "burn_view_ch" => %new_burn_view, + ); + + if !won_last_sortition { + return Ok(()); + } + + match self.start_new_tenure( + canonical_stacks_tip, // For tenure extend, we should be extending off the canonical tip + block_election_snapshot, + burn_tip, + MinerReason::Extended { + burn_view_consensus_hash: new_burn_view, + }, + ) { + Ok(()) => { + debug!("Relayer: successfully started new tenure."); + } + Err(e) => { + error!("Relayer: Failed to start new tenure: {e:?}"); + } + } + Ok(()) + } + fn handle_sortition( &mut self, consensus_hash: ConsensusHash, burn_hash: BurnchainHeaderHash, committed_index_hash: StacksBlockId, ) -> bool { - let miner_instruction = - self.process_sortition(consensus_hash, burn_hash, committed_index_hash); + let Ok(miner_instruction) = + self.process_sortition(consensus_hash, burn_hash, committed_index_hash) + else { + return false; + }; match miner_instruction { MinerDirective::BeginTenure { parent_tenure_start, burnchain_tip, - } => match self.start_new_tenure(parent_tenure_start, burnchain_tip) { + } => match self.start_new_tenure( + parent_tenure_start, + burnchain_tip.clone(), + burnchain_tip, + MinerReason::BlockFound, + ) { Ok(()) => { debug!("Relayer: successfully started new tenure."); } Err(e) => { - error!("Relayer: Failed to start new tenure: {:?}", e); + error!("Relayer: Failed to start new tenure: {e:?}"); } }, - MinerDirective::ContinueTenure { new_burn_view: _ } => { - // TODO: in this case, we eventually want to undergo a tenure - // change to switch to the new burn view, but right now, we will - // simply end our current tenure if it exists - match self.stop_tenure() { + MinerDirective::ContinueTenure { new_burn_view } => { + match self.continue_tenure(new_burn_view) { Ok(()) => { - debug!("Relayer: successfully stopped tenure."); + debug!("Relayer: successfully handled continue tenure."); } Err(e) => { - error!("Relayer: Failed to stop tenure: {:?}", e); + error!("Relayer: Failed to continue tenure: {e:?}"); + return false; } } } @@ -667,7 +913,7 @@ impl RelayerThread { debug!("Relayer: successfully stopped tenure."); } Err(e) => { - error!("Relayer: Failed to stop tenure: {:?}", e); + error!("Relayer: Failed to stop tenure: {e:?}"); } }, } @@ -675,19 +921,38 @@ impl RelayerThread { true } + /// Generate and submit the next block-commit, and record it locally fn issue_block_commit( &mut self, - tenure_start_ch: ConsensusHash, - tenure_start_bh: BlockHeaderHash, + tip_block_ch: ConsensusHash, + tip_block_bh: BlockHeaderHash, ) -> Result<(), NakamotoNodeError> { - let (last_committed_at, target_epoch_id, commit) = - self.make_block_commit(&tenure_start_ch, &tenure_start_bh)?; + let mut last_committed = self.make_block_commit(&tip_block_ch, &tip_block_bh)?; + #[cfg(test)] + { + if self + .globals + .counters + .naka_skip_commit_op + .0 + .lock() + .unwrap() + .unwrap_or(false) + { + warn!("Relayer: not submitting block-commit to bitcoin network due to test directive."); + return Ok(()); + } + } + + // sign and broadcast let mut op_signer = self.keychain.generate_op_signer(); let txid = self .bitcoin_controller .submit_operation( - target_epoch_id, - BlockstackOperationType::LeaderBlockCommit(commit), + last_committed.get_epoch_id().clone(), + BlockstackOperationType::LeaderBlockCommit( + last_committed.get_block_commit().clone(), + ), &mut op_signer, 1, ) @@ -695,107 +960,102 @@ impl RelayerThread { warn!("Failed to submit block-commit bitcoin transaction"); NakamotoNodeError::BurnchainSubmissionFailed })?; + info!( "Relayer: Submitted block-commit"; - "parent_consensus_hash" => %tenure_start_ch, - "parent_block_hash" => %tenure_start_bh, + "tip_consensus_hash" => %tip_block_ch, + "tip_block_hash" => %tip_block_bh, "txid" => %txid, ); + // update local state + last_committed.set_txid(&txid); self.last_commits.insert(txid); - self.last_committed = Some(( - last_committed_at, - StacksBlockId::new(&tenure_start_ch, &tenure_start_bh), - )); + self.last_committed = Some(last_committed); self.globals.counters.bump_naka_submitted_commits(); Ok(()) } + /// Determine what the relayer should do to advance the chain. + /// * If this isn't a miner, then it's always nothing. + /// * Otherwise, if we haven't done so already, go register a VRF public key fn initiative(&mut self) -> Option { if !self.is_miner { return None; } - // do we need a VRF key registration? - if matches!( - self.globals.get_leader_key_registration_state(), - LeaderKeyRegistrationState::Inactive - ) { - let Ok(sort_tip) = SortitionDB::get_canonical_burn_chain_tip(self.sortdb.conn()) else { - warn!("Failed to fetch sortition tip while needing to register VRF key"); + match self.globals.get_leader_key_registration_state() { + // do we need a VRF key registration? + LeaderKeyRegistrationState::Inactive => { + let Ok(sort_tip) = SortitionDB::get_canonical_burn_chain_tip(self.sortdb.conn()) + else { + warn!("Failed to fetch sortition tip while needing to register VRF key"); + return None; + }; + return Some(RelayerDirective::RegisterKey(sort_tip)); + } + // are we still waiting on a pending registration? + LeaderKeyRegistrationState::Pending(..) => { return None; - }; - return Some(RelayerDirective::RegisterKey(sort_tip)); - } - - // are we still waiting on a pending registration? - if !matches!( - self.globals.get_leader_key_registration_state(), - LeaderKeyRegistrationState::Active(_) - ) { - return None; - } - - // has there been a new sortition - let Ok(sort_tip) = SortitionDB::get_canonical_burn_chain_tip(self.sortdb.conn()) else { - return None; - }; - - // check if the burnchain changed, if so, we should issue a commit. - // if not, we may still want to update a commit if we've received a new tenure start block - let burnchain_changed = if let Some((last_committed_at, ..)) = self.last_committed.as_ref() - { - // if the new sortition tip has a different consesus hash than the last commit, - // issue a new commit - sort_tip.consensus_hash != last_committed_at.consensus_hash - } else { - // if there was no last commit, issue a new commit - true + } + LeaderKeyRegistrationState::Active(_) => {} }; - let Ok(Some(chain_tip_header)) = - NakamotoChainState::get_canonical_block_header(self.chainstate.db(), &self.sortdb) + // load up canonical sortition and stacks tips + let Ok(sort_tip) = + SortitionDB::get_canonical_burn_chain_tip(self.sortdb.conn()).map_err(|e| { + error!("Failed to load canonical sortition tip: {:?}", &e); + e + }) else { - info!("No known canonical tip, will issue a genesis block commit"); - return Some(RelayerDirective::IssueBlockCommit( - FIRST_BURNCHAIN_CONSENSUS_HASH, - FIRST_STACKS_BLOCK_HASH, - )); + return None; }; - // get the starting block of the chain tip's tenure - let Ok(Some(chain_tip_tenure_start)) = - NakamotoChainState::get_block_header_by_consensus_hash( - self.chainstate.db(), - &chain_tip_header.consensus_hash, - ) + // NOTE: this may be an epoch2x tip + let Ok((stacks_tip_ch, stacks_tip_bh)) = + SortitionDB::get_canonical_stacks_chain_tip_hash(self.sortdb.conn()).map_err(|e| { + error!("Failed to load canonical stacks tip: {:?}", &e); + e + }) else { - warn!("Failure getting the first block of tenure in order to assemble block commit"; - "tenure_consensus_hash" => %chain_tip_header.consensus_hash, - "tip_block_hash" => %chain_tip_header.anchored_header.block_hash()); return None; }; - - let chain_tip_tenure_id = chain_tip_tenure_start.index_block_hash(); - let should_commit = burnchain_changed - || if let Some((_, last_committed_tenure_id)) = self.last_committed.as_ref() { - // if the tenure ID of the chain tip has changed, issue a new commit - last_committed_tenure_id != &chain_tip_tenure_id - } else { - // should be unreachable, but either way, if - // `self.last_committed` is None, we should issue a commit - true - }; - - if should_commit { - Some(RelayerDirective::IssueBlockCommit( - chain_tip_tenure_start.consensus_hash, - chain_tip_tenure_start.anchored_header.block_hash(), - )) - } else { - None + let stacks_tip = StacksBlockId::new(&stacks_tip_ch, &stacks_tip_bh); + + // check stacks and sortition tips to see if any chainstate change has happened. + // did our view of the sortition history change? + // if so, then let's try and confirm the highest tenure so far. + let burnchain_changed = self + .last_committed + .as_ref() + .map(|cmt| cmt.get_burn_tip().consensus_hash != sort_tip.consensus_hash) + .unwrap_or(true); + + let highest_tenure_changed = self + .last_committed + .as_ref() + .map(|cmt| cmt.get_tenure_id() != &stacks_tip_ch) + .unwrap_or(true); + + debug!("Relayer: initiative to commit"; + "sortititon tip" => %sort_tip.consensus_hash, + "stacks tip" => %stacks_tip, + "last-commit burn view" => %self.last_committed.as_ref().map(|cmt| cmt.get_burn_tip().consensus_hash.to_string()).unwrap_or("(not set)".to_string()), + "last-commit ongoing tenure" => %self.last_committed.as_ref().map(|cmt| cmt.get_tenure_id().to_string()).unwrap_or("(not set)".to_string()), + "burnchain view changed?" => %burnchain_changed, + "highest tenure changed?" => %highest_tenure_changed); + + if !burnchain_changed && !highest_tenure_changed { + // nothing to do + return None; } + + // burnchain view or highest-tenure view changed, so we need to send (or RBF) a commit + Some(RelayerDirective::IssueBlockCommit( + stacks_tip_ch, + stacks_tip_bh, + )) } /// Main loop of the relayer. @@ -804,10 +1064,12 @@ impl RelayerThread { pub fn main(mut self, relay_rcv: Receiver) { debug!("relayer thread ID is {:?}", std::thread::current().id()); - self.next_initiative = Instant::now() + Duration::from_secs(10); + self.next_initiative = + Instant::now() + Duration::from_millis(self.config.node.next_initiative_delay); while self.globals.keep_running() { let directive = if Instant::now() >= self.next_initiative { - self.next_initiative = Instant::now() + Duration::from_secs(10); + self.next_initiative = + Instant::now() + Duration::from_millis(self.config.node.next_initiative_delay); self.initiative() } else { None @@ -843,6 +1105,43 @@ impl RelayerThread { debug!("Relayer exit!"); } + /// Try loading up a saved VRF key + pub(crate) fn load_saved_vrf_key(path: &str, pubkey_hash: &Hash160) -> Option { + let mut f = match fs::File::open(path) { + Ok(f) => f, + Err(e) => { + warn!("Could not open {}: {:?}", &path, &e); + return None; + } + }; + let mut registered_key_bytes = vec![]; + if let Err(e) = f.read_to_end(&mut registered_key_bytes) { + warn!( + "Failed to read registered key bytes from {}: {:?}", + path, &e + ); + return None; + } + + let Ok(registered_key) = serde_json::from_slice::(®istered_key_bytes) + else { + warn!( + "Did not load registered key from {}: could not decode JSON", + &path + ); + return None; + }; + + // Check that the loaded key's memo matches the current miner's key + if registered_key.memo != pubkey_hash.as_ref() { + warn!("Loaded VRF key does not match mining key"); + return None; + } + + info!("Loaded registered key from {}", &path); + Some(registered_key) + } + /// Top-level dispatcher pub fn handle_directive(&mut self, directive: RelayerDirective) -> bool { debug!("Relayer: handling directive"; "directive" => %directive); @@ -861,7 +1160,18 @@ impl RelayerThread { info!("In initial block download, will not submit VRF registration"); return true; } - self.rotate_vrf_and_register(&last_burn_block); + let mut saved_key_opt = None; + if let Some(path) = self.config.miner.activated_vrf_key_path.as_ref() { + saved_key_opt = + Self::load_saved_vrf_key(&path, &self.keychain.get_nakamoto_pkh()); + } + if let Some(saved_key) = saved_key_opt { + debug!("Relayer: resuming VRF key"); + self.globals.resume_leader_key(saved_key); + } else { + self.rotate_vrf_and_register(&last_burn_block); + debug!("Relayer: directive Registered VRF key"); + } self.globals.counters.bump_blocks_processed(); true } @@ -902,3 +1212,121 @@ impl RelayerThread { continue_running } } + +#[cfg(test)] +pub mod test { + use std::fs::File; + use std::io::Write; + use std::path::Path; + + use stacks::util::hash::Hash160; + use stacks::util::secp256k1::Secp256k1PublicKey; + use stacks::util::vrf::VRFPublicKey; + + use super::RelayerThread; + use crate::nakamoto_node::save_activated_vrf_key; + use crate::run_loop::RegisteredKey; + use crate::Keychain; + + #[test] + fn load_nonexistent_vrf_key() { + let keychain = Keychain::default(vec![0u8; 32]); + let pk = Secp256k1PublicKey::from_private(keychain.get_nakamoto_sk()); + let pubkey_hash = Hash160::from_node_public_key(&pk); + + let path = "/tmp/does_not_exist.json"; + _ = std::fs::remove_file(&path); + + let res = RelayerThread::load_saved_vrf_key(&path, &pubkey_hash); + assert!(res.is_none()); + } + + #[test] + fn load_empty_vrf_key() { + let keychain = Keychain::default(vec![0u8; 32]); + let pk = Secp256k1PublicKey::from_private(keychain.get_nakamoto_sk()); + let pubkey_hash = Hash160::from_node_public_key(&pk); + + let path = "/tmp/empty.json"; + File::create(&path).expect("Failed to create test file"); + assert!(Path::new(&path).exists()); + + let res = RelayerThread::load_saved_vrf_key(&path, &pubkey_hash); + assert!(res.is_none()); + + std::fs::remove_file(&path).expect("Failed to delete test file"); + } + + #[test] + fn load_bad_vrf_key() { + let keychain = Keychain::default(vec![0u8; 32]); + let pk = Secp256k1PublicKey::from_private(keychain.get_nakamoto_sk()); + let pubkey_hash = Hash160::from_node_public_key(&pk); + + let path = "/tmp/invalid_saved_key.json"; + let json_content = r#"{ "hello": "world" }"#; + + // Write the JSON content to the file + let mut file = File::create(&path).expect("Failed to create test file"); + file.write_all(json_content.as_bytes()) + .expect("Failed to write to test file"); + assert!(Path::new(&path).exists()); + + let res = RelayerThread::load_saved_vrf_key(&path, &pubkey_hash); + assert!(res.is_none()); + + std::fs::remove_file(&path).expect("Failed to delete test file"); + } + + #[test] + fn save_load_vrf_key() { + let keychain = Keychain::default(vec![0u8; 32]); + let pk = Secp256k1PublicKey::from_private(keychain.get_nakamoto_sk()); + let pubkey_hash = Hash160::from_node_public_key(&pk); + let key = RegisteredKey { + target_block_height: 101, + block_height: 102, + op_vtxindex: 1, + vrf_public_key: VRFPublicKey::from_hex( + "1da75863a7e1ef86f0f550d92b1f77dc60af23694b884b2816b703137ff94e71", + ) + .unwrap(), + memo: pubkey_hash.as_ref().to_vec(), + }; + let path = "/tmp/vrf_key.json"; + save_activated_vrf_key(path, &key); + + let res = RelayerThread::load_saved_vrf_key(&path, &pubkey_hash); + assert!(res.is_some()); + + std::fs::remove_file(&path).expect("Failed to delete test file"); + } + + #[test] + fn invalid_saved_memo() { + let keychain = Keychain::default(vec![0u8; 32]); + let pk = Secp256k1PublicKey::from_private(keychain.get_nakamoto_sk()); + let pubkey_hash = Hash160::from_node_public_key(&pk); + let key = RegisteredKey { + target_block_height: 101, + block_height: 102, + op_vtxindex: 1, + vrf_public_key: VRFPublicKey::from_hex( + "1da75863a7e1ef86f0f550d92b1f77dc60af23694b884b2816b703137ff94e71", + ) + .unwrap(), + memo: pubkey_hash.as_ref().to_vec(), + }; + let path = "/tmp/vrf_key.json"; + save_activated_vrf_key(path, &key); + + let keychain = Keychain::default(vec![1u8; 32]); + let pk = Secp256k1PublicKey::from_private(keychain.get_nakamoto_sk()); + let pubkey_hash = Hash160::from_node_public_key(&pk); + + let res = RelayerThread::load_saved_vrf_key(&path, &pubkey_hash); + assert!(res.is_none()); + + std::fs::remove_file(&path).expect("Failed to delete test file"); + } +} diff --git a/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs b/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs index b1118bebff2..6a5f026a16c 100644 --- a/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs +++ b/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs @@ -13,23 +13,28 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . +use std::collections::BTreeMap; use std::sync::mpsc::Receiver; use std::time::{Duration, Instant}; use hashbrown::{HashMap, HashSet}; -use libsigner::{ - MessageSlotID, SignerEntries, SignerEvent, SignerMessage, SignerSession, StackerDBSession, -}; +use libsigner::v0::messages::{BlockResponse, MinerSlotID, SignerMessage as SignerMessageV0}; +use libsigner::v1::messages::{MessageSlotID, SignerMessage as SignerMessageV1}; +use libsigner::{BlockProposal, SignerEntries, SignerEvent, SignerSession, StackerDBSession}; use stacks::burnchains::Burnchain; use stacks::chainstate::burn::db::sortdb::SortitionDB; -use stacks::chainstate::burn::BlockSnapshot; -use stacks::chainstate::nakamoto::{NakamotoBlock, NakamotoChainState}; +use stacks::chainstate::burn::{BlockSnapshot, ConsensusHash}; +use stacks::chainstate::nakamoto::{NakamotoBlock, NakamotoBlockHeader, NakamotoChainState}; use stacks::chainstate::stacks::boot::{NakamotoSignerEntry, RewardSet, MINERS_NAME, SIGNERS_NAME}; use stacks::chainstate::stacks::events::StackerDBChunksEvent; use stacks::chainstate::stacks::{Error as ChainstateError, ThresholdSignature}; use stacks::libstackerdb::StackerDBChunkData; use stacks::net::stackerdb::StackerDBs; +use stacks::types::PublicKey; +use stacks::util::hash::MerkleHashFunc; +use stacks::util::secp256k1::MessageSignature; use stacks::util_lib::boot::boot_code_id; +use stacks_common::bitvec::BitVec; use stacks_common::codec::StacksMessageCodec; use stacks_common::types::chainstate::{StacksPrivateKey, StacksPublicKey}; use wsts::common::PolyCommitment; @@ -43,6 +48,7 @@ use wsts::v2::Aggregator; use super::Error as NakamotoNodeError; use crate::event_dispatcher::STACKER_DB_CHANNEL; +use crate::neon::Counters; use crate::Config; /// How long should the coordinator poll on the event receiver before @@ -62,6 +68,9 @@ pub struct SignCoordinator { is_mainnet: bool, miners_session: StackerDBSession, signing_round_timeout: Duration, + signer_entries: HashMap, + weight_threshold: u32, + pub next_signer_bitvec: BitVec<4000>, } pub struct NakamotoSigningParams { @@ -118,6 +127,7 @@ impl NakamotoSigningParams { } } +#[allow(dead_code)] fn get_signer_commitments( is_mainnet: bool, reward_set: &[NakamotoSignerEntry], @@ -138,10 +148,10 @@ fn get_signer_commitments( ); continue; }; - let Ok(SignerMessage::DkgResults { + let Ok(SignerMessageV1::DkgResults { aggregate_key, party_polynomials, - }) = SignerMessage::consensus_deserialize(&mut signer_data.as_slice()) + }) = SignerMessageV1::consensus_deserialize(&mut signer_data.as_slice()) else { warn!( "Failed to parse DKG result, will look for results from other signers."; @@ -190,15 +200,14 @@ impl SignCoordinator { /// * `aggregate_public_key` - the active aggregate key for this cycle pub fn new( reward_set: &RewardSet, - reward_cycle: u64, message_key: Scalar, - aggregate_public_key: Point, - stackerdb_conn: &StackerDBs, config: &Config, + // v1: bool, ) -> Result { let is_mainnet = config.is_mainnet(); let Some(ref reward_set_signers) = reward_set.signers else { - error!("Could not initialize WSTS coordinator for reward set without signer"); + error!("Could not initialize signing coordinator for reward set without signer"); + debug!("reward set: {:?}", &reward_set); return Err(ChainstateError::NoRegisteredSigners(0)); }; @@ -209,6 +218,15 @@ impl SignCoordinator { let miners_contract_id = boot_code_id(MINERS_NAME, is_mainnet); let miners_session = StackerDBSession::new(&rpc_socket.to_string(), miners_contract_id); + let next_signer_bitvec: BitVec<4000> = BitVec::zeros( + reward_set_signers + .clone() + .len() + .try_into() + .expect("FATAL: signer set length greater than u16"), + ) + .expect("FATAL: unable to construct initial bitvec for signer set"); + let NakamotoSigningParams { num_signers, num_keys, @@ -237,19 +255,55 @@ impl SignCoordinator { ..Default::default() }; - let mut coordinator: FireCoordinator = FireCoordinator::new(coord_config); - let party_polynomials = get_signer_commitments( - is_mainnet, - reward_set_signers.as_slice(), - stackerdb_conn, - reward_cycle, - &aggregate_public_key, - )?; - if let Err(e) = coordinator - .set_key_and_party_polynomials(aggregate_public_key.clone(), party_polynomials) + let total_weight = reward_set.total_signing_weight().map_err(|e| { + warn!("Failed to calculate total weight for the reward set: {e:?}"); + ChainstateError::NoRegisteredSigners(0) + })?; + + let threshold = NakamotoBlockHeader::compute_voting_weight_threshold(total_weight)?; + + let signer_public_keys = reward_set_signers + .iter() + .cloned() + .enumerate() + .map(|(idx, signer)| { + let Ok(slot_id) = u32::try_from(idx) else { + return Err(ChainstateError::InvalidStacksBlock( + "Signer index exceeds u32".into(), + )); + }; + Ok((slot_id, signer)) + }) + .collect::, ChainstateError>>()?; + + let coordinator: FireCoordinator = FireCoordinator::new(coord_config); + #[cfg(test)] { - warn!("Failed to set a valid set of party polynomials"; "error" => %e); - }; + // In test mode, short-circuit spinning up the SignCoordinator if the TEST_SIGNING + // channel has been created. This allows integration tests for the stacks-node + // independent of the stacks-signer. + use crate::tests::nakamoto_integrations::TEST_SIGNING; + if TEST_SIGNING.lock().unwrap().is_some() { + debug!("Short-circuiting spinning up coordinator from signer commitments. Using test signers channel."); + let (receiver, replaced_other) = STACKER_DB_CHANNEL.register_miner_coordinator(); + if replaced_other { + warn!("Replaced the miner/coordinator receiver of a prior thread. Prior thread may have crashed."); + } + let sign_coordinator = Self { + coordinator, + message_key, + receiver: Some(receiver), + wsts_public_keys, + is_mainnet, + miners_session, + signing_round_timeout: config.miner.wait_on_signers.clone(), + next_signer_bitvec, + signer_entries: signer_public_keys, + weight_threshold: threshold, + }; + return Ok(sign_coordinator); + } + } let (receiver, replaced_other) = STACKER_DB_CHANNEL.register_miner_coordinator(); if replaced_other { @@ -264,6 +318,9 @@ impl SignCoordinator { is_mainnet, miners_session, signing_round_timeout: config.miner.wait_on_signers.clone(), + next_signer_bitvec, + signer_entries: signer_public_keys, + weight_threshold: threshold, }) } @@ -274,25 +331,54 @@ impl SignCoordinator { .expect("FATAL: tried to initialize WSTS coordinator before first burn block height") } - fn send_signers_message( + /// Send a message over the miners contract using a `Scalar` private key + fn send_miners_message_scalar( message_key: &Scalar, sortdb: &SortitionDB, tip: &BlockSnapshot, stackerdbs: &StackerDBs, - message: SignerMessage, + message: M, + miner_slot_id: MinerSlotID, is_mainnet: bool, miners_session: &mut StackerDBSession, + election_sortition: &ConsensusHash, ) -> Result<(), String> { let mut miner_sk = StacksPrivateKey::from_slice(&message_key.to_bytes()).unwrap(); miner_sk.set_compress_public(true); - let miner_pubkey = StacksPublicKey::from_private(&miner_sk); - let Some(slot_range) = NakamotoChainState::get_miner_slot(sortdb, tip, &miner_pubkey) + Self::send_miners_message( + &miner_sk, + sortdb, + tip, + stackerdbs, + message, + miner_slot_id, + is_mainnet, + miners_session, + election_sortition, + ) + } + + /// Send a message over the miners contract using a `StacksPrivateKey` + pub fn send_miners_message( + miner_sk: &StacksPrivateKey, + sortdb: &SortitionDB, + tip: &BlockSnapshot, + stackerdbs: &StackerDBs, + message: M, + miner_slot_id: MinerSlotID, + is_mainnet: bool, + miners_session: &mut StackerDBSession, + election_sortition: &ConsensusHash, + ) -> Result<(), String> { + let Some(slot_range) = NakamotoChainState::get_miner_slot(sortdb, tip, &election_sortition) .map_err(|e| format!("Failed to read miner slot information: {e:?}"))? else { return Err("No slot for miner".into()); }; - let target_slot = 1; - let slot_id = slot_range.start + target_slot; + + let slot_id = slot_range + .start + .saturating_add(miner_slot_id.to_u8().into()); if !slot_range.contains(&slot_id) { return Err("Not enough slots for miner messages".into()); } @@ -322,14 +408,18 @@ impl SignCoordinator { } } - pub fn begin_sign( + #[cfg_attr(test, mutants::skip)] + pub fn begin_sign_v1( &mut self, block: &NakamotoBlock, + burn_block_height: u64, block_attempt: u64, burn_tip: &BlockSnapshot, burnchain: &Burnchain, sortdb: &SortitionDB, stackerdbs: &StackerDBs, + counters: &Counters, + election_sortiton: &ConsensusHash, ) -> Result { let sign_id = Self::get_sign_id(burn_tip.block_height, burnchain); let sign_iter_id = block_attempt; @@ -339,7 +429,13 @@ impl SignCoordinator { self.coordinator.current_sign_id = sign_id; self.coordinator.current_sign_iter_id = sign_iter_id; - let block_bytes = block.serialize_to_vec(); + let proposal_msg = BlockProposal { + block: block.clone(), + burn_height: burn_block_height, + reward_cycle: reward_cycle_id, + }; + + let block_bytes = proposal_msg.serialize_to_vec(); let nonce_req_msg = self .coordinator .start_signing_round(&block_bytes, false, None) @@ -348,16 +444,31 @@ impl SignCoordinator { "Failed to start signing round in FIRE coordinator: {e:?}" )) })?; - Self::send_signers_message( + Self::send_miners_message_scalar::( &self.message_key, sortdb, burn_tip, &stackerdbs, nonce_req_msg.into(), + MinerSlotID::BlockProposal, self.is_mainnet, &mut self.miners_session, + election_sortiton, ) .map_err(NakamotoNodeError::SigningCoordinatorFailure)?; + counters.bump_naka_proposed_blocks(); + #[cfg(test)] + { + // In test mode, short-circuit waiting for the signers if the TEST_SIGNING + // channel has been created. This allows integration tests for the stacks-node + // independent of the stacks-signer. + if let Some(_signatures) = + crate::tests::nakamoto_integrations::TestSigningChannel::get_signature() + { + debug!("Short-circuiting waiting for signers, using test signature"); + return Ok(ThresholdSignature::empty()); + } + } let Some(ref mut receiver) = self.receiver else { return Err(NakamotoNodeError::SigningCoordinatorFailure( @@ -385,6 +496,22 @@ impl SignCoordinator { debug!("Ignoring StackerDB event for non-signer contract"; "contract" => %event.contract_id); continue; } + let modified_slots = &event.modified_slots; + + // Update `next_signers_bitvec` with the slots that were modified in the event + modified_slots.iter().for_each(|chunk| { + if let Ok(slot_id) = chunk.slot_id.try_into() { + match &self.next_signer_bitvec.set(slot_id, true) { + Err(e) => { + warn!("Failed to set bitvec for next signer: {e:?}"); + } + _ => (), + }; + } else { + error!("FATAL: slot_id greater than u16, which should never happen."); + } + }); + let Ok(signer_event) = SignerEvent::try_from(event).map_err(|e| { warn!("Failure parsing StackerDB event into signer event. Ignoring message."; "err" => ?e); }) else { @@ -405,10 +532,11 @@ impl SignCoordinator { let packets: Vec<_> = messages .into_iter() .filter_map(|msg| match msg { - SignerMessage::DkgResults { .. } - | SignerMessage::BlockResponse(_) - | SignerMessage::Transactions(_) => None, - SignerMessage::Packet(packet) => { + SignerMessageV1::DkgResults { .. } + | SignerMessageV1::BlockResponse(_) + | SignerMessageV1::EncryptedSignerState(_) + | SignerMessageV1::Transactions(_) => None, + SignerMessageV1::Packet(packet) => { debug!("Received signers packet: {packet:?}"); if !packet.verify(&self.wsts_public_keys, &coordinator_pk) { warn!("Failed to verify StackerDB packet: {packet:?}"); @@ -454,6 +582,10 @@ impl SignCoordinator { "Signature failed to validate over the expected block".into(), )); } else { + info!( + "SignCoordinator: Generated a valid signature for the block"; + "next_signer_bitvec" => self.next_signer_bitvec.binary_str(), + ); return Ok(signature); } } @@ -465,14 +597,18 @@ impl SignCoordinator { } } for msg in outbound_msgs { - match Self::send_signers_message( + match Self::send_miners_message_scalar::( &self.message_key, sortdb, burn_tip, stackerdbs, msg.into(), + // TODO: note, in v1, we'll want to add a new slot, but for now, it just shares + // with the block proposal + MinerSlotID::BlockProposal, self.is_mainnet, &mut self.miners_session, + election_sortiton, ) { Ok(()) => { debug!("Miner/Coordinator: sent outbound message."); @@ -490,4 +626,223 @@ impl SignCoordinator { "Timed out waiting for group signature".into(), )) } + + /// Start gathering signatures for a Nakamoto block. + /// This function begins by sending a `BlockProposal` message + /// to the signers, and then waits for the signers to respond + /// with their signatures. + // Mutants skip here: this function is covered via integration tests, + // which the mutation testing does not see. + #[cfg_attr(test, mutants::skip)] + pub fn begin_sign_v0( + &mut self, + block: &NakamotoBlock, + block_attempt: u64, + burn_tip: &BlockSnapshot, + burnchain: &Burnchain, + sortdb: &SortitionDB, + stackerdbs: &StackerDBs, + counters: &Counters, + election_sortition: &ConsensusHash, + ) -> Result, NakamotoNodeError> { + let sign_id = Self::get_sign_id(burn_tip.block_height, burnchain); + let sign_iter_id = block_attempt; + let reward_cycle_id = burnchain + .block_height_to_reward_cycle(burn_tip.block_height) + .expect("FATAL: tried to initialize coordinator before first burn block height"); + self.coordinator.current_sign_id = sign_id; + self.coordinator.current_sign_iter_id = sign_iter_id; + + let block_proposal = BlockProposal { + block: block.clone(), + burn_height: burn_tip.block_height, + reward_cycle: reward_cycle_id, + }; + + let block_proposal_message = SignerMessageV0::BlockProposal(block_proposal); + debug!("Sending block proposal message to signers"; + "signer_signature_hash" => %block.header.signer_signature_hash(), + ); + Self::send_miners_message_scalar::( + &self.message_key, + sortdb, + burn_tip, + &stackerdbs, + block_proposal_message, + MinerSlotID::BlockProposal, + self.is_mainnet, + &mut self.miners_session, + election_sortition, + ) + .map_err(NakamotoNodeError::SigningCoordinatorFailure)?; + counters.bump_naka_proposed_blocks(); + #[cfg(test)] + { + info!( + "SignCoordinator: sent block proposal to .miners, waiting for test signing channel" + ); + // In test mode, short-circuit waiting for the signers if the TEST_SIGNING + // channel has been created. This allows integration tests for the stacks-node + // independent of the stacks-signer. + if let Some(signatures) = + crate::tests::nakamoto_integrations::TestSigningChannel::get_signature() + { + debug!("Short-circuiting waiting for signers, using test signature"); + return Ok(signatures); + } + } + + let Some(ref mut receiver) = self.receiver else { + return Err(NakamotoNodeError::SigningCoordinatorFailure( + "Failed to obtain the StackerDB event receiver".into(), + )); + }; + + let mut total_weight_signed: u32 = 0; + let mut gathered_signatures = BTreeMap::new(); + + info!("SignCoordinator: beginning to watch for block signatures."; + "threshold" => self.weight_threshold, + ); + + let start_ts = Instant::now(); + while start_ts.elapsed() <= self.signing_round_timeout { + let event = match receiver.recv_timeout(EVENT_RECEIVER_POLL) { + Ok(event) => event, + Err(std::sync::mpsc::RecvTimeoutError::Timeout) => { + continue; + } + Err(std::sync::mpsc::RecvTimeoutError::Disconnected) => { + return Err(NakamotoNodeError::SigningCoordinatorFailure( + "StackerDB event receiver disconnected".into(), + )) + } + }; + + let is_signer_event = + event.contract_id.name.starts_with(SIGNERS_NAME) && event.contract_id.is_boot(); + if !is_signer_event { + debug!("Ignoring StackerDB event for non-signer contract"; "contract" => %event.contract_id); + continue; + } + + let modified_slots = &event.modified_slots.clone(); + + let Ok(signer_event) = SignerEvent::::try_from(event).map_err(|e| { + warn!("Failure parsing StackerDB event into signer event. Ignoring message."; "err" => ?e); + }) else { + continue; + }; + let SignerEvent::SignerMessages(signer_set, messages) = signer_event else { + debug!("Received signer event other than a signer message. Ignoring."); + continue; + }; + if signer_set != u32::try_from(reward_cycle_id % 2).unwrap() { + debug!("Received signer event for other reward cycle. Ignoring."); + continue; + }; + let slot_ids = modified_slots + .iter() + .map(|chunk| chunk.slot_id) + .collect::>(); + + debug!("SignCoordinator: Received messages from signers"; + "count" => messages.len(), + "slot_ids" => ?slot_ids, + "threshold" => self.weight_threshold + ); + + for (message, slot_id) in messages.into_iter().zip(slot_ids) { + let (response_hash, signature) = match message { + SignerMessageV0::BlockResponse(BlockResponse::Accepted(( + response_hash, + signature, + ))) => (response_hash, signature), + SignerMessageV0::BlockResponse(BlockResponse::Rejected(_)) => { + debug!("Received rejected block response. Ignoring."); + continue; + } + SignerMessageV0::BlockProposal(_) => { + debug!("Received block proposal message. Ignoring."); + continue; + } + SignerMessageV0::BlockPushed(_) => { + debug!("Received block pushed message. Ignoring."); + continue; + } + SignerMessageV0::MockSignature(_) => { + debug!("Received mock signature message. Ignoring."); + continue; + } + }; + let block_sighash = block.header.signer_signature_hash(); + if block_sighash != response_hash { + warn!( + "Processed signature for a different block. Will try to continue."; + "signature" => %signature, + "block_signer_signature_hash" => %block_sighash, + "response_hash" => %response_hash, + "slot_id" => slot_id, + "reward_cycle_id" => reward_cycle_id, + "response_hash" => %response_hash + ); + continue; + } + debug!("SignCoordinator: Received valid signature from signer"; "slot_id" => slot_id, "signature" => %signature); + let Some(signer_entry) = &self.signer_entries.get(&slot_id) else { + return Err(NakamotoNodeError::SignerSignatureError( + "Signer entry not found".into(), + )); + }; + let Ok(signer_pubkey) = StacksPublicKey::from_slice(&signer_entry.signing_key) + else { + return Err(NakamotoNodeError::SignerSignatureError( + "Failed to parse signer public key".into(), + )); + }; + let Ok(valid_sig) = signer_pubkey.verify(block_sighash.bits(), &signature) else { + warn!("Got invalid signature from a signer. Ignoring."); + continue; + }; + if !valid_sig { + warn!( + "Processed signature but didn't validate over the expected block. Ignoring"; + "signature" => %signature, + "block_signer_signature_hash" => %block_sighash, + "slot_id" => slot_id, + ); + continue; + } + if !gathered_signatures.contains_key(&slot_id) { + total_weight_signed = total_weight_signed + .checked_add(signer_entry.weight) + .expect("FATAL: total weight signed exceeds u32::MAX"); + } + debug!("Signature Added to block"; + "block_signer_sighash" => %block_sighash, + "signer_pubkey" => signer_pubkey.to_hex(), + "signer_slot_id" => slot_id, + "signature" => %signature, + "signer_weight" => signer_entry.weight, + "total_weight_signed" => total_weight_signed, + "stacks_block_hash" => %block.header.block_hash(), + "stacks_block_id" => %block.header.block_id() + ); + gathered_signatures.insert(slot_id, signature); + } + + // After gathering all signatures, return them if we've hit the threshold + if total_weight_signed >= self.weight_threshold { + info!("SignCoordinator: Received enough signatures. Continuing."; + "stacks_block_hash" => %block.header.block_hash(), + "stacks_block_id" => %block.header.block_id() + ); + return Ok(gathered_signatures.values().cloned().collect()); + } + } + + Err(NakamotoNodeError::SignerSignatureError( + "Timed out waiting for group signature".into(), + )) + } } diff --git a/testnet/stacks-node/src/neon_node.rs b/testnet/stacks-node/src/neon_node.rs index 3778c8ecc9f..8c3c4ed1799 100644 --- a/testnet/stacks-node/src/neon_node.rs +++ b/testnet/stacks-node/src/neon_node.rs @@ -153,7 +153,7 @@ use clarity::vm::types::{PrincipalData, QualifiedContractIdentifier}; use stacks::burnchains::bitcoin::address::{BitcoinAddress, LegacyBitcoinAddressType}; use stacks::burnchains::db::BurnchainHeaderReader; use stacks::burnchains::{Burnchain, BurnchainSigner, PoxConstants, Txid}; -use stacks::chainstate::burn::db::sortdb::SortitionDB; +use stacks::chainstate::burn::db::sortdb::{SortitionDB, SortitionHandleConn}; use stacks::chainstate::burn::operations::leader_block_commit::{ RewardSetInfo, BURN_BLOCK_MINED_AT_MODULUS, }; @@ -208,6 +208,7 @@ use crate::burnchains::bitcoin_regtest_controller::{ }; use crate::burnchains::make_bitcoin_indexer; use crate::chain_data::MinerStats; +use crate::config::NodeConfig; use crate::globals::{NeonGlobals as Globals, RelayerDirective}; use crate::run_loop::neon::RunLoop; use crate::run_loop::RegisteredKey; @@ -298,7 +299,7 @@ pub struct StacksNode { /// True if we're a miner is_miner: bool, /// handle to the p2p thread - pub p2p_thread_handle: JoinHandle<()>, + pub p2p_thread_handle: JoinHandle>, /// handle to the relayer thread pub relayer_thread_handle: JoinHandle<()>, } @@ -401,9 +402,10 @@ struct ParentStacksBlockInfo { coinbase_nonce: u64, } -#[derive(Clone)] +#[derive(Clone, Default)] pub enum LeaderKeyRegistrationState { /// Not started yet + #[default] Inactive, /// Waiting for burnchain confirmation /// `u64` is the target block height in which we intend this key to land @@ -663,7 +665,7 @@ impl MicroblockMinerThread { frequency, last_mined: 0, quantity: 0, - cost_so_far: cost_so_far, + cost_so_far, settings, }) } @@ -727,7 +729,7 @@ impl MicroblockMinerThread { .unwrap_or(0) ); - let burn_height = + let block_snapshot = SortitionDB::get_block_snapshot_consensus(sortdb.conn(), &self.parent_consensus_hash) .map_err(|e| { error!("Failed to find block snapshot for mined block: {}", e); @@ -736,8 +738,8 @@ impl MicroblockMinerThread { .ok_or_else(|| { error!("Failed to find block snapshot for mined block"); ChainstateError::NoSuchBlockError - })? - .block_height; + })?; + let burn_height = block_snapshot.block_height; let ast_rules = SortitionDB::get_ast_rules(sortdb.conn(), burn_height).map_err(|e| { error!("Failed to get AST rules for microblock: {}", e); @@ -753,7 +755,10 @@ impl MicroblockMinerThread { .epoch_id; let mint_result = { - let ic = sortdb.index_conn(); + let ic = sortdb.index_handle_at_block( + &chainstate, + &block_snapshot.get_canonical_stacks_block_id(), + )?; let mut microblock_miner = match StacksMicroblockBuilder::resume_unconfirmed( chainstate, &ic, @@ -1110,6 +1115,7 @@ impl BlockMinerThread { let burn_parent_modulus = (current_burn_height % BURN_BLOCK_MINED_AT_MODULUS) as u8; let sender = self.keychain.get_burnchain_signer(); BlockstackOperationType::LeaderBlockCommit(LeaderBlockCommitOp { + treatment: vec![], sunset_burn, block_header_hash, burn_fee, @@ -1144,6 +1150,50 @@ impl BlockMinerThread { ret } + /// Is a given Stacks staging block on the canonical burnchain fork? + pub(crate) fn is_on_canonical_burnchain_fork( + candidate: &StagingBlock, + sortdb_tip_handle: &SortitionHandleConn, + ) -> bool { + let candidate_ch = &candidate.consensus_hash; + let candidate_burn_ht = match SortitionDB::get_block_snapshot_consensus( + sortdb_tip_handle.conn(), + candidate_ch, + ) { + Ok(Some(x)) => x.block_height, + Ok(None) => { + warn!("Tried to evaluate potential chain tip with an unknown consensus hash"; + "consensus_hash" => %candidate_ch, + "stacks_block_hash" => %candidate.anchored_block_hash); + return false; + } + Err(e) => { + warn!("Error while trying to evaluate potential chain tip with an unknown consensus hash"; + "consensus_hash" => %candidate_ch, + "stacks_block_hash" => %candidate.anchored_block_hash, + "err" => ?e); + return false; + } + }; + let tip_ch = match sortdb_tip_handle.get_consensus_at(candidate_burn_ht) { + Ok(Some(x)) => x, + Ok(None) => { + warn!("Tried to evaluate potential chain tip with a consensus hash ahead of canonical tip"; + "consensus_hash" => %candidate_ch, + "stacks_block_hash" => %candidate.anchored_block_hash); + return false; + } + Err(e) => { + warn!("Error while trying to evaluate potential chain tip with an unknown consensus hash"; + "consensus_hash" => %candidate_ch, + "stacks_block_hash" => %candidate.anchored_block_hash, + "err" => ?e); + return false; + } + }; + &tip_ch == candidate_ch + } + /// Load all candidate tips upon which to build. This is all Stacks blocks whose heights are /// less than or equal to at `at_stacks_height` (or the canonical chain tip height, if not given), /// but greater than or equal to this end height minus `max_depth`. @@ -1173,61 +1223,42 @@ impl BlockMinerThread { let stacks_tips: Vec<_> = stacks_tips .into_iter() - .filter(|candidate| { - let candidate_ch = &candidate.consensus_hash; - let candidate_burn_ht = match SortitionDB::get_block_snapshot_consensus( - sortdb_tip_handle.conn(), - candidate_ch - ) { - Ok(Some(x)) => x.block_height, - Ok(None) => { - warn!("Tried to evaluate potential chain tip with an unknown consensus hash"; - "consensus_hash" => %candidate_ch, - "stacks_block_hash" => %candidate.anchored_block_hash); - return false; - }, - Err(e) => { - warn!("Error while trying to evaluate potential chain tip with an unknown consensus hash"; - "consensus_hash" => %candidate_ch, - "stacks_block_hash" => %candidate.anchored_block_hash, - "err" => ?e); - return false; - }, - }; - let tip_ch = match sortdb_tip_handle.get_consensus_at(candidate_burn_ht) { - Ok(Some(x)) => x, - Ok(None) => { - warn!("Tried to evaluate potential chain tip with a consensus hash ahead of canonical tip"; - "consensus_hash" => %candidate_ch, - "stacks_block_hash" => %candidate.anchored_block_hash); - return false; - }, - Err(e) => { - warn!("Error while trying to evaluate potential chain tip with an unknown consensus hash"; - "consensus_hash" => %candidate_ch, - "stacks_block_hash" => %candidate.anchored_block_hash, - "err" => ?e); - return false; - }, - }; - if &tip_ch != candidate_ch { - false - } else { - true - } - }) + .filter(|candidate| Self::is_on_canonical_burnchain_fork(candidate, &sortdb_tip_handle)) .collect(); + if stacks_tips.len() == 0 { + return vec![]; + } + let mut considered = HashSet::new(); let mut candidates = vec![]; let end_height = stacks_tips[0].height; - for cur_height in end_height.saturating_sub(max_depth)..=end_height { - let stacks_tips = chain_state + // process these tips + for tip in stacks_tips.into_iter() { + let index_block_hash = + StacksBlockId::new(&tip.consensus_hash, &tip.anchored_block_hash); + let burn_height = burn_db + .get_consensus_hash_height(&tip.consensus_hash) + .expect("FATAL: could not query burnchain block height") + .expect("FATAL: no burnchain block height for Stacks tip"); + let candidate = TipCandidate::new(tip, burn_height); + candidates.push(candidate); + considered.insert(index_block_hash); + } + + // process earlier tips, back to max_depth + for cur_height in end_height.saturating_sub(max_depth)..end_height { + let stacks_tips: Vec<_> = chain_state .get_stacks_chain_tips_at_height(cur_height) - .expect("FATAL: could not query chain tips at height"); + .expect("FATAL: could not query chain tips at height") + .into_iter() + .filter(|candidate| { + Self::is_on_canonical_burnchain_fork(candidate, &sortdb_tip_handle) + }) + .collect(); - for tip in stacks_tips { + for tip in stacks_tips.into_iter() { let index_block_hash = StacksBlockId::new(&tip.consensus_hash, &tip.anchored_block_hash); @@ -1507,6 +1538,8 @@ impl BlockMinerThread { Some((*best_tip).clone()) } + // TODO: add tests from mutation testing results #4870 + #[cfg_attr(test, mutants::skip)] /// Load up the parent block info for mining. /// If there's no parent because this is the first block, then return the genesis block's info. /// If we can't find the parent in the DB but we expect one, return None. @@ -1712,7 +1745,7 @@ impl BlockMinerThread { fn make_vrf_proof(&mut self) -> Option { // if we're a mock miner, then make sure that the keychain has a keypair for the mocked VRF // key - let vrf_proof = if self.config.node.mock_mining { + let vrf_proof = if self.config.get_node_config(false).mock_mining { self.keychain.generate_proof( VRF_MOCK_MINER_KEY, self.burn_block.sortition_hash.as_bytes(), @@ -2222,6 +2255,8 @@ impl BlockMinerThread { return false; } + // TODO: add tests from mutation testing results #4871 + #[cfg_attr(test, mutants::skip)] /// Try to mine a Stacks block by assembling one from mempool transactions and sending a /// burnchain block-commit transaction. If we succeed, then return the assembled block data as /// well as the microblock private key to use to produce microblocks. @@ -2352,7 +2387,7 @@ impl BlockMinerThread { } let (anchored_block, _, _) = match StacksBlockBuilder::build_anchored_block( &chain_state, - &burn_db.index_conn(), + &burn_db.index_handle(&burn_tip.sortition_id), &mut mem_pool, &parent_block_info.stacks_parent_header, parent_block_info.parent_block_total_burn, @@ -2382,7 +2417,7 @@ impl BlockMinerThread { // try again match StacksBlockBuilder::build_anchored_block( &chain_state, - &burn_db.index_conn(), + &burn_db.index_handle(&burn_tip.sortition_id), &mut mem_pool, &parent_block_info.stacks_parent_header, parent_block_info.parent_block_total_burn, @@ -2535,7 +2570,7 @@ impl BlockMinerThread { let res = bitcoin_controller.submit_operation(target_epoch_id, op, &mut op_signer, attempt); if res.is_none() { self.failed_to_submit_last_attempt = true; - if !self.config.node.mock_mining { + if !self.config.get_node_config(false).mock_mining { warn!("Relayer: Failed to submit Bitcoin transaction"); return None; } @@ -2727,6 +2762,7 @@ impl RelayerThread { .process_network_result( &relayer_thread.local_peer, &mut net_result, + &relayer_thread.burnchain, sortdb, chainstate, mempool, @@ -3096,6 +3132,8 @@ impl RelayerThread { (true, miner_tip) } + // TODO: add tests from mutation testing results #4872 + #[cfg_attr(test, mutants::skip)] /// Process all new tenures that we're aware of. /// Clear out stale tenure artifacts as well. /// Update the miner tip if we won the highest tenure (or clear it if we didn't). @@ -3317,10 +3355,16 @@ impl RelayerThread { fn inner_generate_leader_key_register_op( vrf_public_key: VRFPublicKey, consensus_hash: &ConsensusHash, + miner_pk: Option<&StacksPublicKey>, ) -> BlockstackOperationType { + let memo = if let Some(pk) = miner_pk { + Hash160::from_node_public_key(pk).as_bytes().to_vec() + } else { + vec![] + }; BlockstackOperationType::LeaderKeyRegister(LeaderKeyRegisterOp { public_key: vrf_public_key, - memo: vec![], + memo, consensus_hash: consensus_hash.clone(), vtxindex: 0, txid: Txid([0u8; 32]), @@ -3350,7 +3394,20 @@ impl RelayerThread { ); let burnchain_tip_consensus_hash = &burn_block.consensus_hash; - let op = Self::inner_generate_leader_key_register_op(vrf_pk, burnchain_tip_consensus_hash); + // if the miner has set a mining key in preparation for epoch-3.0, register it as part of their VRF key registration + // once implemented in the nakamoto_node, this will allow miners to transition from 2.5 to 3.0 without submitting a new + // VRF key registration. + let miner_pk = self + .config + .miner + .mining_key + .as_ref() + .map(StacksPublicKey::from_private); + let op = Self::inner_generate_leader_key_register_op( + vrf_pk, + burnchain_tip_consensus_hash, + miner_pk.as_ref(), + ); let mut one_off_signer = self.keychain.generate_op_signer(); if let Some(txid) = @@ -3518,7 +3575,7 @@ impl RelayerThread { return false; } - if !self.config.node.mock_mining { + if !self.config.get_node_config(false).mock_mining { // mock miner can't mine microblocks yet, so don't stop it from trying multiple // anchored blocks if self.mined_stacks_block && self.config.node.mine_microblocks { @@ -3550,6 +3607,8 @@ impl RelayerThread { true } + // TODO: add tests from mutation testing results #4872 + #[cfg_attr(test, mutants::skip)] /// See if we should run a microblock tenure now. /// Return true if so; false if not fn can_run_microblock_tenure(&mut self) -> bool { @@ -3912,7 +3971,6 @@ impl RelayerThread { if let Some(saved_key) = saved_key_opt { self.globals.resume_leader_key(saved_key); } else { - debug!("Relayer: directive Register VRF key"); self.rotate_vrf_and_register(&last_burn_block); debug!("Relayer: directive Registered VRF key"); } @@ -3927,6 +3985,17 @@ impl RelayerThread { } RelayerDirective::RunTenure(registered_key, last_burn_block, issue_timestamp_ms) => { debug!("Relayer: directive Run tenure"); + let Ok(Some(next_block_epoch)) = SortitionDB::get_stacks_epoch( + self.sortdb_ref().conn(), + last_burn_block.block_height.saturating_add(1), + ) else { + warn!("Failed to load Stacks Epoch for next burn block, skipping RunTenure directive"); + return true; + }; + if next_block_epoch.epoch_id.uses_nakamoto_blocks() { + info!("Next burn block is in Nakamoto epoch, skipping RunTenure directive for 2.x node"); + return true; + } self.block_miner_thread_try_start( registered_key, last_burn_block, @@ -4047,7 +4116,7 @@ impl ParentStacksBlockInfo { let principal = miner_address.into(); let account = chain_state .with_read_only_clarity_tx( - &burn_db.index_conn(), + &burn_db.index_handle(&burn_chain_tip.sortition_id), &StacksBlockHeader::make_index_block_hash(mine_tip_ch, mine_tip_bh), |conn| StacksChainState::get_account(conn, &principal), ) @@ -4171,7 +4240,7 @@ impl PeerThread { net.bind(&p2p_sock, &rpc_sock) .expect("BUG: PeerNetwork could not bind or is already bound"); - let poll_timeout = cmp::min(5000, config.miner.first_attempt_time_ms / 2); + let poll_timeout = config.get_poll_time(); PeerThread { config, @@ -4591,7 +4660,12 @@ impl StacksNode { stackerdb_configs.insert(contract.clone(), StackerDBConfig::noop()); } let stackerdb_configs = stackerdbs - .create_or_reconfigure_stackerdbs(&mut chainstate, &sortdb, stackerdb_configs) + .create_or_reconfigure_stackerdbs( + &mut chainstate, + &sortdb, + stackerdb_configs, + config.connection_options.num_neighbors, + ) .unwrap(); let stackerdb_contract_ids: Vec = @@ -4655,7 +4729,10 @@ impl StacksNode { /// Main loop of the p2p thread. /// Runs in a separate thread. /// Continuously receives, until told otherwise. - pub fn p2p_main(mut p2p_thread: PeerThread, event_dispatcher: EventDispatcher) { + pub fn p2p_main( + mut p2p_thread: PeerThread, + event_dispatcher: EventDispatcher, + ) -> Option { let should_keep_running = p2p_thread.globals.should_keep_running.clone(); let (mut dns_resolver, mut dns_client) = DNSResolver::new(10); @@ -4718,6 +4795,7 @@ impl StacksNode { thread::sleep(Duration::from_secs(5)); } info!("P2P thread exit!"); + p2p_thread.net } /// This function sets the global var `GLOBAL_BURNCHAIN_SIGNER`. @@ -4776,8 +4854,12 @@ impl StacksNode { let local_peer = p2p_net.local_peer.clone(); + let NodeConfig { + mock_mining, miner, .. + } = config.get_node_config(false); + // setup initial key registration - let leader_key_registration_state = if config.node.mock_mining { + let leader_key_registration_state = if mock_mining { // mock mining, pretend to have a registered key let (vrf_public_key, _) = keychain.make_vrf_keypair(VRF_MOCK_MINER_KEY); LeaderKeyRegistrationState::Active(RegisteredKey { @@ -4785,8 +4867,13 @@ impl StacksNode { block_height: 1, op_vtxindex: 1, vrf_public_key, + memo: vec![], }) } else { + // Warn the user that they need to set up a miner key + if miner && config.miner.mining_key.is_none() { + warn!("`[miner.mining_key]` not set in config file. This will be required to mine in Epoch 3.0!") + } LeaderKeyRegistrationState::Inactive }; globals.set_initial_leader_key_registration_state(leader_key_registration_state); @@ -4814,7 +4901,7 @@ impl StacksNode { )) .spawn(move || { debug!("p2p thread ID is {:?}", thread::current().id()); - Self::p2p_main(p2p_thread, p2p_event_dispatcher); + Self::p2p_main(p2p_thread, p2p_event_dispatcher) }) .expect("FATAL: failed to start p2p thread"); @@ -5017,8 +5104,8 @@ impl StacksNode { } /// Join all inner threads - pub fn join(self) { + pub fn join(self) -> Option { self.relayer_thread_handle.join().unwrap(); - self.p2p_thread_handle.join().unwrap(); + self.p2p_thread_handle.join().unwrap() } } diff --git a/testnet/stacks-node/src/node.rs b/testnet/stacks-node/src/node.rs index 77117a6822b..39095a51d5d 100644 --- a/testnet/stacks-node/src/node.rs +++ b/testnet/stacks-node/src/node.rs @@ -3,7 +3,6 @@ use std::net::SocketAddr; use std::thread::JoinHandle; use std::{env, thread, time}; -use clarity::vm::database::BurnStateDB; use rand::RngCore; use stacks::burnchains::bitcoin::BitcoinNetworkType; use stacks::burnchains::db::BurnchainDB; @@ -581,6 +580,7 @@ impl Node { block_height: op.block_height as u64, op_vtxindex: op.vtxindex as u32, target_block_height: (op.block_height as u64) - 1, + memo: op.memo.clone(), }); } } @@ -890,10 +890,10 @@ impl Node { let mut cost_estimator = self.config.make_cost_estimator(); let mut fee_estimator = self.config.make_fee_estimator(); - let stacks_epoch = db - .index_conn() - .get_stacks_epoch_by_epoch_id(&processed_block.evaluated_epoch) - .expect("Could not find a stacks epoch."); + let stacks_epoch = + SortitionDB::get_stacks_epoch_by_epoch_id(db.conn(), &processed_block.evaluated_epoch) + .expect("FATAL: could not query sortition DB for epochs") + .expect("Could not find a stacks epoch."); if let Some(estimator) = cost_estimator.as_mut() { estimator.notify_block( &processed_block.tx_receipts, @@ -905,7 +905,7 @@ impl Node { if let Some(estimator) = fee_estimator.as_mut() { if let Err(e) = estimator.notify_block(&processed_block, &stacks_epoch.block_limit) { warn!("FeeEstimator failed to process block receipt"; - "stacks_block" => %processed_block.header.anchored_header.block_hash(), + "stacks_block_hash" => %processed_block.header.anchored_header.block_hash(), "stacks_height" => %processed_block.header.stacks_block_height, "error" => %e); } @@ -1036,6 +1036,7 @@ impl Node { let txid = Txid(txid_bytes); BlockstackOperationType::LeaderBlockCommit(LeaderBlockCommitOp { + treatment: vec![], sunset_burn: 0, block_header_hash, burn_fee, diff --git a/testnet/stacks-node/src/run_loop/boot_nakamoto.rs b/testnet/stacks-node/src/run_loop/boot_nakamoto.rs index dec1ca757f5..b78d857d59b 100644 --- a/testnet/stacks-node/src/run_loop/boot_nakamoto.rs +++ b/testnet/stacks-node/src/run_loop/boot_nakamoto.rs @@ -23,13 +23,42 @@ use stacks::burnchains::Burnchain; use stacks::chainstate::burn::db::sortdb::SortitionDB; use stacks::chainstate::coordinator::comm::CoordinatorChannels; use stacks::core::StacksEpochExtension; +use stacks::net::p2p::PeerNetwork; use stacks_common::types::{StacksEpoch, StacksEpochId}; +use crate::globals::NeonGlobals; use crate::neon::Counters; +use crate::neon_node::LeaderKeyRegistrationState; use crate::run_loop::nakamoto::RunLoop as NakaRunLoop; use crate::run_loop::neon::RunLoop as NeonRunLoop; use crate::Config; +/// Data which should persist through transition from Neon => Nakamoto run loop +#[derive(Default)] +pub struct Neon2NakaData { + pub leader_key_registration_state: LeaderKeyRegistrationState, + pub peer_network: Option, +} + +impl Neon2NakaData { + /// Take needed values from `NeonGlobals` and optionally `PeerNetwork`, consuming them + pub fn new(globals: NeonGlobals, peer_network: Option) -> Self { + let key_state = globals + .leader_key_registration_state + .lock() + .unwrap_or_else(|e| { + // can only happen due to a thread panic in the relayer + error!("FATAL: leader key registration mutex is poisoned: {e:?}"); + panic!(); + }); + + Self { + leader_key_registration_state: (*key_state).clone(), + peer_network, + } + } +} + /// This runloop handles booting to Nakamoto: /// During epochs [1.0, 2.5], it runs a neon run_loop. /// Once epoch 3.0 is reached, it stops the neon run_loop @@ -108,9 +137,13 @@ impl BootRunLoop { let InnerLoops::Epoch3(ref mut naka_loop) = self.active_loop else { panic!("FATAL: unexpectedly invoked start_from_naka when active loop wasn't nakamoto"); }; - naka_loop.start(burnchain_opt, mine_start) + naka_loop.start(burnchain_opt, mine_start, None) } + // configuring mutants::skip -- this function is covered through integration tests (this function + // is pretty definitionally an integration, so thats unavoidable), and the integration tests + // do not get counted in mutants coverage. + #[cfg_attr(test, mutants::skip)] fn start_from_neon(&mut self, burnchain_opt: Option, mine_start: u64) { let InnerLoops::Epoch2(ref mut neon_loop) = self.active_loop else { panic!("FATAL: unexpectedly invoked start_from_neon when active loop wasn't neon"); @@ -120,7 +153,7 @@ impl BootRunLoop { let boot_thread = Self::spawn_stopper(&self.config, neon_loop) .expect("FATAL: failed to spawn epoch-2/3-boot thread"); - neon_loop.start(burnchain_opt.clone(), mine_start); + let data_to_naka = neon_loop.start(burnchain_opt.clone(), mine_start); let monitoring_thread = neon_loop.take_monitoring_thread(); // did we exit because of the epoch-3.0 transition, or some other reason? @@ -131,7 +164,12 @@ impl BootRunLoop { info!("Shutting down epoch-2/3 transition thread"); return; } - info!("Reached Epoch-3.0 boundary, starting nakamoto node"); + + info!( + "Reached Epoch-3.0 boundary, starting nakamoto node"; + "with_neon_data" => data_to_naka.is_some(), + "with_p2p_stack" => data_to_naka.as_ref().map(|x| x.peer_network.is_some()).unwrap_or(false) + ); termination_switch.store(true, Ordering::SeqCst); let naka = NakaRunLoop::new( self.config.clone(), @@ -150,7 +188,7 @@ impl BootRunLoop { let InnerLoops::Epoch3(ref mut naka_loop) = self.active_loop else { panic!("FATAL: unexpectedly found epoch2 loop after setting epoch3 active"); }; - naka_loop.start(burnchain_opt, mine_start) + naka_loop.start(burnchain_opt, mine_start, data_to_naka) } fn spawn_stopper( diff --git a/testnet/stacks-node/src/run_loop/helium.rs b/testnet/stacks-node/src/run_loop/helium.rs index c7212d4132b..2922ce584ac 100644 --- a/testnet/stacks-node/src/run_loop/helium.rs +++ b/testnet/stacks-node/src/run_loop/helium.rs @@ -89,8 +89,11 @@ impl RunLoop { let _ = burnchain.sortdb_mut(); // Run the tenure, keep the artifacts - let artifacts_from_1st_tenure = match first_tenure.run(&burnchain.sortdb_ref().index_conn()) - { + let artifacts_from_1st_tenure = match first_tenure.run( + &burnchain + .sortdb_ref() + .index_handle(&burnchain_tip.block_snapshot.sortition_id), + ) { Some(res) => res, None => panic!("Error while running 1st tenure"), }; @@ -136,7 +139,9 @@ impl RunLoop { &burnchain_tip, &chain_tip, &mut self.node.chain_state, - &burnchain.sortdb_ref().index_conn(), + &burnchain + .sortdb_ref() + .index_handle(&burnchain_tip.block_snapshot.sortition_id), ); // If the node we're looping on won the sortition, initialize and configure the next tenure @@ -160,7 +165,11 @@ impl RunLoop { &chain_tip, &mut tenure, ); - tenure.run(&burnchain.sortdb_ref().index_conn()) + tenure.run( + &burnchain + .sortdb_ref() + .index_handle(&burnchain_tip.block_snapshot.sortition_id), + ) } None => None, }; @@ -214,7 +223,9 @@ impl RunLoop { &burnchain_tip, &chain_tip, &mut self.node.chain_state, - &burnchain.sortdb_ref().index_conn(), + &burnchain + .sortdb_ref() + .index_handle(&burnchain_tip.block_snapshot.sortition_id), ); } }; diff --git a/testnet/stacks-node/src/run_loop/mod.rs b/testnet/stacks-node/src/run_loop/mod.rs index 01f848c2e6f..b824793e172 100644 --- a/testnet/stacks-node/src/run_loop/mod.rs +++ b/testnet/stacks-node/src/run_loop/mod.rs @@ -158,6 +158,9 @@ pub struct RegisteredKey { pub op_vtxindex: u32, /// the public key itself pub vrf_public_key: VRFPublicKey, + /// `memo` field that was used to register key + /// Could be `Hash160(miner_pubkey)`, or empty + pub memo: Vec, } pub fn announce_boot_receipts( diff --git a/testnet/stacks-node/src/run_loop/nakamoto.rs b/testnet/stacks-node/src/run_loop/nakamoto.rs index dd13b2d32c1..511b6c84b2d 100644 --- a/testnet/stacks-node/src/run_loop/nakamoto.rs +++ b/testnet/stacks-node/src/run_loop/nakamoto.rs @@ -39,10 +39,12 @@ use crate::burnchains::make_bitcoin_indexer; use crate::globals::Globals as GenericGlobals; use crate::monitoring::{start_serving_monitoring_metrics, MonitoringError}; use crate::nakamoto_node::{self, StacksNode, BLOCK_PROCESSOR_STACK_SIZE, RELAYER_MAX_BUFFER}; +use crate::neon_node::LeaderKeyRegistrationState; use crate::node::{ get_account_balances, get_account_lockups, get_names, get_namespaces, use_test_genesis_chainstate, }; +use crate::run_loop::boot_nakamoto::Neon2NakaData; use crate::run_loop::neon; use crate::run_loop::neon::Counters; use crate::syncctl::{PoxSyncWatchdog, PoxSyncWatchdogComms}; @@ -195,7 +197,7 @@ impl RunLoop { return true; } } - if self.config.node.mock_mining { + if self.config.get_node_config(false).mock_mining { info!("No UTXOs found, but configured to mock mine"); return true; } else { @@ -392,7 +394,12 @@ impl RunLoop { /// It will start the burnchain (separate thread), set-up a channel in /// charge of coordinating the new blocks coming from the burnchain and /// the nodes, taking turns on tenures. - pub fn start(&mut self, burnchain_opt: Option, mut mine_start: u64) { + pub fn start( + &mut self, + burnchain_opt: Option, + mut mine_start: u64, + data_from_neon: Option, + ) { let (coordinator_receivers, coordinator_senders) = self .coordinator_channels .take() @@ -440,6 +447,7 @@ impl RunLoop { self.pox_watchdog_comms.clone(), self.should_keep_running.clone(), mine_start, + LeaderKeyRegistrationState::default(), ); self.set_globals(globals.clone()); @@ -475,7 +483,7 @@ impl RunLoop { // Boot up the p2p network and relayer, and figure out how many sortitions we have so far // (it could be non-zero if the node is resuming from chainstate) - let mut node = StacksNode::spawn(self, globals.clone(), relay_recv); + let mut node = StacksNode::spawn(self, globals.clone(), relay_recv, data_from_neon); // Wait for all pending sortitions to process let burnchain_db = burnchain_config @@ -627,9 +635,12 @@ impl RunLoop { let sortition_id = &block.sortition_id; // Have the node process the new block, that can include, or not, a sortition. - if let Err(e) = - node.process_burnchain_state(burnchain.sortdb_mut(), sortition_id, ibd) - { + if let Err(e) = node.process_burnchain_state( + self.config(), + burnchain.sortdb_mut(), + sortition_id, + ibd, + ) { // relayer errored, exit. error!("Runloop: Block relayer and miner errored, exiting."; "err" => ?e); return; diff --git a/testnet/stacks-node/src/run_loop/neon.rs b/testnet/stacks-node/src/run_loop/neon.rs index 6f2f643d30b..663c14e27ba 100644 --- a/testnet/stacks-node/src/run_loop/neon.rs +++ b/testnet/stacks-node/src/run_loop/neon.rs @@ -33,11 +33,14 @@ use super::RunLoopCallbacks; use crate::burnchains::{make_bitcoin_indexer, Error}; use crate::globals::NeonGlobals as Globals; use crate::monitoring::{start_serving_monitoring_metrics, MonitoringError}; -use crate::neon_node::{StacksNode, BLOCK_PROCESSOR_STACK_SIZE, RELAYER_MAX_BUFFER}; +use crate::neon_node::{ + LeaderKeyRegistrationState, StacksNode, BLOCK_PROCESSOR_STACK_SIZE, RELAYER_MAX_BUFFER, +}; use crate::node::{ get_account_balances, get_account_lockups, get_names, get_namespaces, use_test_genesis_chainstate, }; +use crate::run_loop::boot_nakamoto::Neon2NakaData; use crate::syncctl::{PoxSyncWatchdog, PoxSyncWatchdogComms}; use crate::{ run_loop, BitcoinRegtestController, BurnchainController, Config, EventDispatcher, Keychain, @@ -79,6 +82,17 @@ impl std::ops::Deref for RunLoopCounter { } } +#[cfg(test)] +#[derive(Clone)] +pub struct TestFlag(pub Arc>>); + +#[cfg(test)] +impl Default for TestFlag { + fn default() -> Self { + Self(Arc::new(std::sync::Mutex::new(None))) + } +} + #[derive(Clone, Default)] pub struct Counters { pub blocks_processed: RunLoopCounter, @@ -92,6 +106,9 @@ pub struct Counters { pub naka_mined_blocks: RunLoopCounter, pub naka_proposed_blocks: RunLoopCounter, pub naka_mined_tenures: RunLoopCounter, + + #[cfg(test)] + pub naka_skip_commit_op: TestFlag, } impl Counters { @@ -373,7 +390,7 @@ impl RunLoop { return true; } } - if self.config.node.mock_mining { + if self.config.get_node_config(false).mock_mining { info!("No UTXOs found, but configured to mock mine"); return true; } else { @@ -999,7 +1016,13 @@ impl RunLoop { /// It will start the burnchain (separate thread), set-up a channel in /// charge of coordinating the new blocks coming from the burnchain and /// the nodes, taking turns on tenures. - pub fn start(&mut self, burnchain_opt: Option, mut mine_start: u64) { + /// + /// Returns `Option` so that data can be passed to `NakamotoNode` + pub fn start( + &mut self, + burnchain_opt: Option, + mut mine_start: u64, + ) -> Option { let (coordinator_receivers, coordinator_senders) = self .coordinator_channels .take() @@ -1018,12 +1041,12 @@ impl RunLoop { Ok(burnchain_controller) => burnchain_controller, Err(burnchain_error::ShutdownInitiated) => { info!("Exiting stacks-node"); - return; + return None; } Err(e) => { error!("Error initializing burnchain: {}", e); info!("Exiting stacks-node"); - return; + return None; } }; @@ -1046,6 +1069,7 @@ impl RunLoop { self.pox_watchdog_comms.clone(), self.should_keep_running.clone(), mine_start, + LeaderKeyRegistrationState::default(), ); self.set_globals(globals.clone()); @@ -1142,11 +1166,15 @@ impl RunLoop { globals.coord().stop_chains_coordinator(); coordinator_thread_handle.join().unwrap(); - node.join(); + let peer_network = node.join(); liveness_thread.join().unwrap(); + // Data that will be passed to Nakamoto run loop + // Only gets transfered on clean shutdown of neon run loop + let data_to_naka = Neon2NakaData::new(globals, peer_network); + info!("Exiting stacks-node"); - break; + break Some(data_to_naka); } let remote_chain_height = burnchain.get_headers_height() - 1; @@ -1269,7 +1297,7 @@ impl RunLoop { if !node.relayer_sortition_notify() { // relayer hung up, exit. error!("Runloop: Block relayer and miner hung up, exiting."); - return; + return None; } } @@ -1343,7 +1371,7 @@ impl RunLoop { if !node.relayer_issue_tenure(ibd) { // relayer hung up, exit. error!("Runloop: Block relayer and miner hung up, exiting."); - break; + break None; } } } diff --git a/testnet/stacks-node/src/tenure.rs b/testnet/stacks-node/src/tenure.rs index fd7683f569e..5dd67cddabb 100644 --- a/testnet/stacks-node/src/tenure.rs +++ b/testnet/stacks-node/src/tenure.rs @@ -5,7 +5,7 @@ use std::time::{Duration, Instant}; use stacks::burnchains::PoxConstants; #[cfg(test)] use stacks::chainstate::burn::db::sortdb::SortitionDB; -use stacks::chainstate::burn::db::sortdb::SortitionDBConn; +use stacks::chainstate::burn::db::sortdb::SortitionHandleConn; use stacks::chainstate::stacks::db::StacksChainState; use stacks::chainstate::stacks::miner::BlockBuilderSettings; use stacks::chainstate::stacks::{ @@ -72,7 +72,7 @@ impl<'a> Tenure { } } - pub fn run(&mut self, burn_dbconn: &SortitionDBConn) -> Option { + pub fn run(&mut self, burn_dbconn: &SortitionHandleConn) -> Option { info!("Node starting new tenure with VRF {:?}", self.vrf_seed); let duration_left: u128 = self.config.burnchain.commit_anchor_block_within as u128; diff --git a/testnet/stacks-node/src/tests/bitcoin_regtest.rs b/testnet/stacks-node/src/tests/bitcoin_regtest.rs index 5f8b1aabd39..3fbfa519862 100644 --- a/testnet/stacks-node/src/tests/bitcoin_regtest.rs +++ b/testnet/stacks-node/src/tests/bitcoin_regtest.rs @@ -21,6 +21,14 @@ pub enum BitcoinCoreError { SpawnFailed(String), } +impl std::fmt::Display for BitcoinCoreError { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match self { + Self::SpawnFailed(msg) => write!(f, "bitcoind spawn failed: {msg}"), + } + } +} + type BitcoinResult = Result; pub struct BitcoinCoreController { diff --git a/testnet/stacks-node/src/tests/epoch_205.rs b/testnet/stacks-node/src/tests/epoch_205.rs index 0b363081e0d..68f37b4fb8e 100644 --- a/testnet/stacks-node/src/tests/epoch_205.rs +++ b/testnet/stacks-node/src/tests/epoch_205.rs @@ -602,6 +602,7 @@ fn transition_empty_blocks() { let burn_parent_modulus = (tip_info.burn_block_height % BURN_BLOCK_MINED_AT_MODULUS) as u8; let op = BlockstackOperationType::LeaderBlockCommit(LeaderBlockCommitOp { + treatment: vec![], sunset_burn, block_header_hash: BlockHeaderHash([0xff; 32]), burn_fee: rest_commit, diff --git a/testnet/stacks-node/src/tests/epoch_21.rs b/testnet/stacks-node/src/tests/epoch_21.rs index 14db80f0b10..66964679304 100644 --- a/testnet/stacks-node/src/tests/epoch_21.rs +++ b/testnet/stacks-node/src/tests/epoch_21.rs @@ -1928,6 +1928,7 @@ fn transition_empty_blocks() { let burn_parent_modulus = ((tip_info.burn_block_height + 1) % BURN_BLOCK_MINED_AT_MODULUS) as u8; let op = BlockstackOperationType::LeaderBlockCommit(LeaderBlockCommitOp { + treatment: vec![], sunset_burn: 0, block_header_hash: BlockHeaderHash([0xff; 32]), burn_fee: burn_fee_cap, @@ -5143,7 +5144,7 @@ fn test_v1_unlock_height_with_current_stackers() { let sortdb = btc_regtest_controller.sortdb_mut(); for height in 211..tip_info.burn_block_height { - let iconn = sortdb.index_conn(); + let iconn = sortdb.index_handle_at_block(&chainstate, &tip).unwrap(); let pox_addrs = chainstate .clarity_eval_read_only( &iconn, @@ -5423,7 +5424,7 @@ fn test_v1_unlock_height_with_delay_and_current_stackers() { let sortdb = btc_regtest_controller.sortdb_mut(); for height in 211..tip_info.burn_block_height { - let iconn = sortdb.index_conn(); + let iconn = sortdb.index_handle_at_block(&chainstate, &tip).unwrap(); let pox_addrs = chainstate .clarity_eval_read_only( &iconn, diff --git a/testnet/stacks-node/src/tests/epoch_22.rs b/testnet/stacks-node/src/tests/epoch_22.rs index 4e387d6304f..289d09be642 100644 --- a/testnet/stacks-node/src/tests/epoch_22.rs +++ b/testnet/stacks-node/src/tests/epoch_22.rs @@ -400,7 +400,7 @@ fn disable_pox() { reward_cycle_pox_addrs.insert(reward_cycle, HashMap::new()); } - let iconn = sortdb.index_conn(); + let iconn = sortdb.index_handle_at_block(&chainstate, &tip).unwrap(); let pox_addrs = chainstate .clarity_eval_read_only( &iconn, @@ -1069,7 +1069,7 @@ fn pox_2_unlock_all() { reward_cycle_pox_addrs.insert(reward_cycle, HashMap::new()); } - let iconn = sortdb.index_conn(); + let iconn = sortdb.index_handle_at_block(&chainstate, &tip).unwrap(); let pox_addrs = chainstate .clarity_eval_read_only( &iconn, diff --git a/testnet/stacks-node/src/tests/epoch_23.rs b/testnet/stacks-node/src/tests/epoch_23.rs index 470eda96724..0452be84766 100644 --- a/testnet/stacks-node/src/tests/epoch_23.rs +++ b/testnet/stacks-node/src/tests/epoch_23.rs @@ -268,7 +268,7 @@ fn trait_invocation_behavior() { submit_tx(&http_origin, &tx_1); submit_tx(&http_origin, &tx_2); - // this mines bitcoin block epoch_2_1 - 2, and causes the the + // this mines bitcoin block epoch_2_1 - 2, and causes the // stacks node to mine the stacks block which will be included in // epoch_2_1 - 1, so these are the last transactions processed pre-2.1. next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); @@ -339,7 +339,7 @@ fn trait_invocation_behavior() { submit_tx(&http_origin, &tx_1); submit_tx(&http_origin, &tx_2); - // this mines bitcoin block epoch_2_2 - 2, and causes the the + // this mines bitcoin block epoch_2_2 - 2, and causes the // stacks node to mine the stacks block which will be included in // epoch_2_2 - 1, so these are the last transactions processed pre-2.2. next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); @@ -424,7 +424,7 @@ fn trait_invocation_behavior() { submit_tx(&http_origin, &tx_1); submit_tx(&http_origin, &tx_2); - // this mines bitcoin block epoch_2_3 - 2, and causes the the + // this mines bitcoin block epoch_2_3 - 2, and causes the // stacks node to mine the stacks block which will be included in // epoch_2_3 - 1, so these are the last transactions processed pre-2.3. next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); diff --git a/testnet/stacks-node/src/tests/epoch_24.rs b/testnet/stacks-node/src/tests/epoch_24.rs index 2cc9868dc65..3fc3b3d5905 100644 --- a/testnet/stacks-node/src/tests/epoch_24.rs +++ b/testnet/stacks-node/src/tests/epoch_24.rs @@ -28,7 +28,6 @@ use stacks::chainstate::stacks::{Error, StacksTransaction, TransactionPayload}; use stacks::clarity_cli::vm_execute as execute; use stacks::core; use stacks_common::address::{AddressHashMode, C32_ADDRESS_VERSION_TESTNET_SINGLESIG}; -use stacks_common::codec::StacksMessageCodec; use stacks_common::consts::STACKS_EPOCH_MAX; use stacks_common::types::chainstate::{StacksAddress, StacksBlockId, StacksPrivateKey}; use stacks_common::types::Address; @@ -37,6 +36,7 @@ use stacks_common::util::secp256k1::Secp256k1PublicKey; use stacks_common::util::sleep_ms; use crate::config::{EventKeyType, EventObserverConfig, InitialBalance}; +use crate::stacks_common::codec::StacksMessageCodec; use crate::tests::bitcoin_regtest::BitcoinCoreController; use crate::tests::neon_integrations::{ get_account, get_chain_info, get_pox_info, neon_integration_test_conf, next_block_and_wait, @@ -493,7 +493,7 @@ fn fix_to_pox_contract() { reward_cycle_pox_addrs.insert(reward_cycle, HashMap::new()); } - let iconn = sortdb.index_conn(); + let iconn = sortdb.index_handle_at_block(&chainstate, &tip).unwrap(); let pox_addrs = chainstate .clarity_eval_read_only( &iconn, @@ -1213,7 +1213,7 @@ fn verify_auto_unlock_behavior() { reward_cycle_pox_addrs.insert(reward_cycle, HashMap::new()); } - let iconn = sortdb.index_conn(); + let iconn = sortdb.index_handle_at_block(&chainstate, &tip).unwrap(); let pox_addrs = chainstate .clarity_eval_read_only( &iconn, diff --git a/testnet/stacks-node/src/tests/integrations.rs b/testnet/stacks-node/src/tests/integrations.rs index 35dca5b5370..694d27ca155 100644 --- a/testnet/stacks-node/src/tests/integrations.rs +++ b/testnet/stacks-node/src/tests/integrations.rs @@ -22,6 +22,7 @@ use stacks::chainstate::stacks::{ TransactionContractCall, TransactionPayload, }; use stacks::clarity_vm::clarity::ClarityConnection; +use stacks::codec::StacksMessageCodec; use stacks::core::mempool::MAXIMUM_MEMPOOL_TX_CHAINING; use stacks::core::{ StacksEpoch, StacksEpochId, PEER_VERSION_EPOCH_2_0, PEER_VERSION_EPOCH_2_05, @@ -31,7 +32,6 @@ use stacks::net::api::callreadonly::CallReadOnlyRequestBody; use stacks::net::api::getaccount::AccountEntryResponse; use stacks::net::api::getcontractsrc::ContractSrcResponse; use stacks::net::api::getistraitimplemented::GetIsTraitImplementedResponse; -use stacks_common::codec::StacksMessageCodec; use stacks_common::types::chainstate::{StacksAddress, StacksBlockId, VRFSeed}; use stacks_common::util::hash::{hex_bytes, to_hex, Sha256Sum}; diff --git a/testnet/stacks-node/src/tests/mempool.rs b/testnet/stacks-node/src/tests/mempool.rs index 8c906cd43e3..6221c6cf11f 100644 --- a/testnet/stacks-node/src/tests/mempool.rs +++ b/testnet/stacks-node/src/tests/mempool.rs @@ -13,13 +13,13 @@ use stacks::chainstate::stacks::{ TransactionAnchorMode, TransactionAuth, TransactionPayload, TransactionSpendingCondition, TransactionVersion, C32_ADDRESS_VERSION_MAINNET_SINGLESIG, }; +use stacks::codec::StacksMessageCodec; use stacks::core::mempool::MemPoolDB; use stacks::core::{StacksEpochId, CHAIN_ID_TESTNET}; use stacks::cost_estimates::metrics::UnitMetric; use stacks::cost_estimates::UnitEstimator; use stacks::net::Error as NetError; use stacks_common::address::AddressHashMode; -use stacks_common::codec::StacksMessageCodec; use stacks_common::types::chainstate::{BlockHeaderHash, StacksAddress}; use stacks_common::util::hash::*; use stacks_common::util::secp256k1::*; diff --git a/testnet/stacks-node/src/tests/mod.rs b/testnet/stacks-node/src/tests/mod.rs index 5a237e6e20c..a7892b9a2db 100644 --- a/testnet/stacks-node/src/tests/mod.rs +++ b/testnet/stacks-node/src/tests/mod.rs @@ -21,7 +21,7 @@ use clarity::vm::costs::ExecutionCost; use clarity::vm::database::BurnStateDB; use clarity::vm::events::STXEventType; use clarity::vm::types::PrincipalData; -use clarity::vm::{ClarityName, ContractName, Value}; +use clarity::vm::{ClarityName, ClarityVersion, ContractName, Value}; use lazy_static::lazy_static; use rand::RngCore; use stacks::chainstate::burn::ConsensusHash; @@ -223,35 +223,49 @@ pub fn serialize_sign_tx_anchor_mode_version( buf } -pub fn make_contract_publish( +pub fn make_contract_publish_versioned( sender: &StacksPrivateKey, nonce: u64, tx_fee: u64, contract_name: &str, contract_content: &str, + version: Option, ) -> Vec { let name = ContractName::from(contract_name); let code_body = StacksString::from_string(&contract_content.to_string()).unwrap(); - let payload = TransactionSmartContract { name, code_body }; + let payload = + TransactionPayload::SmartContract(TransactionSmartContract { name, code_body }, version); - serialize_sign_standard_single_sig_tx(payload.into(), sender, nonce, tx_fee) + serialize_sign_standard_single_sig_tx(payload, sender, nonce, tx_fee) } -pub fn make_contract_publish_microblock_only( +pub fn make_contract_publish( + sender: &StacksPrivateKey, + nonce: u64, + tx_fee: u64, + contract_name: &str, + contract_content: &str, +) -> Vec { + make_contract_publish_versioned(sender, nonce, tx_fee, contract_name, contract_content, None) +} + +pub fn make_contract_publish_microblock_only_versioned( sender: &StacksPrivateKey, nonce: u64, tx_fee: u64, contract_name: &str, contract_content: &str, + version: Option, ) -> Vec { let name = ContractName::from(contract_name); let code_body = StacksString::from_string(&contract_content.to_string()).unwrap(); - let payload = TransactionSmartContract { name, code_body }; + let payload = + TransactionPayload::SmartContract(TransactionSmartContract { name, code_body }, version); serialize_sign_standard_single_sig_tx_anchor_mode( - payload.into(), + payload, sender, nonce, tx_fee, @@ -259,6 +273,23 @@ pub fn make_contract_publish_microblock_only( ) } +pub fn make_contract_publish_microblock_only( + sender: &StacksPrivateKey, + nonce: u64, + tx_fee: u64, + contract_name: &str, + contract_content: &str, +) -> Vec { + make_contract_publish_microblock_only_versioned( + sender, + nonce, + tx_fee, + contract_name, + contract_content, + None, + ) +} + pub fn new_test_conf() -> Config { // secretKey: "b1cf9cee5083f421c84d7cb53be5edf2801c3c78d63d53917aee0bdc8bd160ee01", // publicKey: "03e2ed46873d0db820e8c6001aabc082d72b5b900b53b7a1b9714fe7bde3037b81", @@ -293,6 +324,41 @@ pub fn new_test_conf() -> Config { conf } +/// Randomly change the config's network ports to new ports. +pub fn set_random_binds(config: &mut Config) { + let prior_rpc_port: u16 = config + .node + .rpc_bind + .split(":") + .last() + .unwrap() + .parse() + .unwrap(); + let prior_p2p_port: u16 = config + .node + .p2p_bind + .split(":") + .last() + .unwrap() + .parse() + .unwrap(); + let (rpc_port, p2p_port) = loop { + let mut rng = rand::thread_rng(); + let mut buf = [0u8; 8]; + rng.fill_bytes(&mut buf); + let rpc_port = u16::from_be_bytes(buf[0..2].try_into().unwrap()).saturating_add(1025) - 1; // use a non-privileged port between 1024 and 65534 + let p2p_port = u16::from_be_bytes(buf[2..4].try_into().unwrap()).saturating_add(1025) - 1; // use a non-privileged port between 1024 and 65534 + if rpc_port != prior_rpc_port && p2p_port != prior_p2p_port { + break (rpc_port, p2p_port); + } + }; + let localhost = "127.0.0.1"; + config.node.rpc_bind = format!("{}:{}", localhost, rpc_port); + config.node.p2p_bind = format!("{}:{}", localhost, p2p_port); + config.node.data_url = format!("http://{}:{}", localhost, rpc_port); + config.node.p2p_address = format!("{}:{}", localhost, p2p_port); +} + pub fn to_addr(sk: &StacksPrivateKey) -> StacksAddress { StacksAddress::from_public_keys( C32_ADDRESS_VERSION_TESTNET_SINGLESIG, @@ -567,7 +633,7 @@ fn should_succeed_mining_valid_txs() { }, 3 => { // On round 3, publish a "set:foo=bar" transaction - // ./blockstack-cli --testnet contract-call 043ff5004e3d695060fa48ac94c96049b8c14ef441c50a184a6a3875d2a000f3 10 2 STGT7GSMZG7EA0TS6MVSKT5JC1DCDFGZWJJZXN8A store set-value -e \"foo\" -e \"bar\" + // ./blockstack-cli --testnet contract-call 043ff5004e3d695060fa48ac94c96049b8c14ef441c50a184a6a3875d2a000f3 10 2 STGT7GSMZG7EA0TS6MVSKT5JC1DCDFGZWJJZXN8A store set-value -e \"foo\" -e \"bar\" let set_foo_bar = "8080000000040021a3c334fc0ee50359353799e8b2605ac6be1fe40000000000000002000000000000000a010142a01caf6a32b367664869182f0ebc174122a5a980937ba259d44cc3ebd280e769a53dd3913c8006ead680a6e1c98099fcd509ce94b0a4e90d9f4603b101922d030200000000021a21a3c334fc0ee50359353799e8b2605ac6be1fe40573746f7265097365742d76616c7565000000020d00000003666f6f0d00000003626172"; tenure.mem_pool.submit_raw(&mut chainstate_copy, &sortdb, &consensus_hash, &header_hash,hex_bytes(set_foo_bar).unwrap().to_vec(), &ExecutionCost::max_value(), diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs index aa545514f05..f3cf76af04b 100644 --- a/testnet/stacks-node/src/tests/nakamoto_integrations.rs +++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs @@ -13,7 +13,8 @@ // // You should have received a copy of the GNU General Public License // along with this program. If not, see . -use std::collections::{HashMap, HashSet}; +use std::collections::{BTreeMap, HashMap, HashSet}; +use std::net::ToSocketAddrs; use std::sync::atomic::{AtomicU64, Ordering}; use std::sync::mpsc::{channel, Receiver, Sender}; use std::sync::{Arc, Mutex}; @@ -24,9 +25,12 @@ use std::{env, thread}; use clarity::vm::ast::ASTRules; use clarity::vm::costs::ExecutionCost; use clarity::vm::types::{PrincipalData, QualifiedContractIdentifier}; +use clarity::vm::{ClarityName, ClarityVersion, Value}; use http_types::headers::AUTHORIZATION; use lazy_static::lazy_static; -use libsigner::{SignerSession, StackerDBSession}; +use libsigner::v0::messages::SignerMessage as SignerMessageV0; +use libsigner::v1::messages::SignerMessage as SignerMessageV1; +use libsigner::{BlockProposal, SignerSession, StackerDBSession}; use rand::RngCore; use stacks::burnchains::{MagicBytes, Txid}; use stacks::chainstate::burn::db::sortdb::SortitionDB; @@ -34,16 +38,26 @@ use stacks::chainstate::burn::operations::{ BlockstackOperationType, PreStxOp, StackStxOp, VoteForAggregateKeyOp, }; use stacks::chainstate::coordinator::comm::CoordinatorChannels; +use stacks::chainstate::coordinator::OnChainRewardSetProvider; +use stacks::chainstate::nakamoto::coordinator::load_nakamoto_reward_set; use stacks::chainstate::nakamoto::miner::NakamotoBlockBuilder; use stacks::chainstate::nakamoto::test_signers::TestSigners; -use stacks::chainstate::nakamoto::{NakamotoBlock, NakamotoChainState}; +use stacks::chainstate::nakamoto::{NakamotoBlock, NakamotoBlockHeader, NakamotoChainState}; use stacks::chainstate::stacks::address::{PoxAddress, StacksAddressExtensions}; use stacks::chainstate::stacks::boot::{ MINERS_NAME, SIGNERS_VOTING_FUNCTION_NAME, SIGNERS_VOTING_NAME, }; use stacks::chainstate::stacks::db::StacksChainState; -use stacks::chainstate::stacks::miner::{BlockBuilder, BlockLimitFunction, TransactionResult}; -use stacks::chainstate::stacks::{StacksTransaction, ThresholdSignature, TransactionPayload}; +use stacks::chainstate::stacks::miner::{ + BlockBuilder, BlockLimitFunction, TransactionEvent, TransactionResult, TransactionSuccessEvent, +}; +use stacks::chainstate::stacks::{ + SinglesigHashMode, SinglesigSpendingCondition, StacksTransaction, TenureChangeCause, + TenureChangePayload, TransactionAnchorMode, TransactionAuth, TransactionPayload, + TransactionPostConditionMode, TransactionPublicKeyEncoding, TransactionSpendingCondition, + TransactionVersion, MAX_BLOCK_LEN, +}; +use stacks::core::mempool::MAXIMUM_MEMPOOL_TX_CHAINING; use stacks::core::{ StacksEpoch, StacksEpochId, BLOCK_LIMIT_MAINNET_10, HELIUM_BLOCK_LIMIT_20, PEER_VERSION_EPOCH_1_0, PEER_VERSION_EPOCH_2_0, PEER_VERSION_EPOCH_2_05, @@ -52,6 +66,7 @@ use stacks::core::{ }; use stacks::libstackerdb::SlotMetadata; use stacks::net::api::callreadonly::CallReadOnlyRequestBody; +use stacks::net::api::get_tenures_fork_info::TenureForkingInfo; use stacks::net::api::getstackers::GetStackersResponse; use stacks::net::api::postblock_proposal::{ BlockValidateReject, BlockValidateResponse, NakamotoBlockProposal, ValidateRejectCode, @@ -67,26 +82,35 @@ use stacks_common::codec::StacksMessageCodec; use stacks_common::consts::{CHAIN_ID_TESTNET, STACKS_EPOCH_MAX}; use stacks_common::types::chainstate::{ BlockHeaderHash, BurnchainHeaderHash, StacksAddress, StacksPrivateKey, StacksPublicKey, + TrieHash, }; use stacks_common::types::StacksPublicKeyBuffer; -use stacks_common::util::hash::{to_hex, Sha512Trunc256Sum}; -use stacks_common::util::secp256k1::{Secp256k1PrivateKey, Secp256k1PublicKey}; -use stacks_common::util::sleep_ms; +use stacks_common::util::hash::{to_hex, Hash160, Sha512Trunc256Sum}; +use stacks_common::util::secp256k1::{MessageSignature, Secp256k1PrivateKey, Secp256k1PublicKey}; +use stacks_common::util::{get_epoch_time_secs, sleep_ms}; +use stacks_signer::chainstate::{ProposalEvalConfig, SortitionsView}; +use stacks_signer::signerdb::{BlockInfo, ExtraBlockInfo, SignerDb}; +use wsts::net::Message; use super::bitcoin_regtest::BitcoinCoreController; use crate::config::{EventKeyType, EventObserverConfig, InitialBalance}; +use crate::nakamoto_node::miner::TEST_BROADCAST_STALL; use crate::neon::{Counters, RunLoopCounter}; use crate::operations::BurnchainOpSigner; use crate::run_loop::boot_nakamoto; use crate::tests::neon_integrations::{ - get_account, get_chain_info_result, get_pox_info, next_block_and_wait, - run_until_burnchain_height, submit_tx, test_observer, wait_for_runloop, + call_read_only, get_account, get_account_result, get_chain_info_result, get_neighbors, + get_pox_info, next_block_and_wait, run_until_burnchain_height, submit_tx, test_observer, + wait_for_runloop, +}; +use crate::tests::{ + get_chain_info, make_contract_publish, make_contract_publish_versioned, make_stacks_transfer, + to_addr, }; -use crate::tests::{get_chain_info, make_stacks_transfer, to_addr}; use crate::{tests, BitcoinRegtestController, BurnchainController, Config, ConfigFile, Keychain}; pub static POX_4_DEFAULT_STACKER_BALANCE: u64 = 100_000_000_000_000; -static POX_4_DEFAULT_STACKER_STX_AMT: u128 = 99_000_000_000_000; +pub static POX_4_DEFAULT_STACKER_STX_AMT: u128 = 99_000_000_000_000; lazy_static! { pub static ref NAKAMOTO_INTEGRATION_EPOCHS: [StacksEpoch; 9] = [ @@ -159,8 +183,10 @@ lazy_static! { pub static TEST_SIGNING: Mutex> = Mutex::new(None); pub struct TestSigningChannel { - pub recv: Option>, - pub send: Sender, + // pub recv: Option>, + pub recv: Option>>, + // pub send: Sender, + pub send: Sender>, } impl TestSigningChannel { @@ -169,14 +195,16 @@ impl TestSigningChannel { /// Returns None if the singleton isn't instantiated and the miner should coordinate /// a real signer set signature. /// Panics if the blind-signer times out. - pub fn get_signature() -> Option { + /// + /// TODO: update to use signatures vec + pub fn get_signature() -> Option> { let mut signer = TEST_SIGNING.lock().unwrap(); let Some(sign_channels) = signer.as_mut() else { return None; }; let recv = sign_channels.recv.take().unwrap(); drop(signer); // drop signer so we don't hold the lock while receiving. - let signature = recv.recv_timeout(Duration::from_secs(30)).unwrap(); + let signatures = recv.recv_timeout(Duration::from_secs(30)).unwrap(); let overwritten = TEST_SIGNING .lock() .unwrap() @@ -185,12 +213,12 @@ impl TestSigningChannel { .recv .replace(recv); assert!(overwritten.is_none()); - Some(signature) + Some(signatures) } /// Setup the TestSigningChannel as a singleton using TEST_SIGNING, /// returning an owned Sender to the channel. - pub fn instantiate() -> Sender { + pub fn instantiate() -> Sender> { let (send, recv) = channel(); let existed = TEST_SIGNING.lock().unwrap().replace(Self { recv: Some(recv), @@ -265,75 +293,245 @@ pub fn blind_signer( signers: &TestSigners, proposals_count: RunLoopCounter, ) -> JoinHandle<()> { + blind_signer_multinode(signers, &[conf], vec![proposals_count]) +} + +/// Spawn a blind signing thread listening to potentially multiple stacks nodes. +/// `signer` is the private key of the individual signer who broadcasts the response to the StackerDB. +/// The thread will check each node's proposal counter in order to wake up, but will only read from the first +/// node's StackerDB (it will read all of the StackerDBs to provide logging information, though). +pub fn blind_signer_multinode( + signers: &TestSigners, + configs: &[&Config], + proposals_count: Vec, +) -> JoinHandle<()> { + assert_eq!( + configs.len(), + proposals_count.len(), + "Expect the same number of node configs as proposals counters" + ); let sender = TestSigningChannel::instantiate(); let mut signed_blocks = HashSet::new(); - let conf = conf.clone(); + let configs: Vec<_> = configs.iter().map(|x| Clone::clone(*x)).collect(); let signers = signers.clone(); - let mut last_count = proposals_count.load(Ordering::SeqCst); - thread::spawn(move || loop { - thread::sleep(Duration::from_millis(100)); - let cur_count = proposals_count.load(Ordering::SeqCst); - if cur_count <= last_count { - continue; - } - last_count = cur_count; - match read_and_sign_block_proposal(&conf, &signers, &signed_blocks, &sender) { - Ok(signed_block) => { - if signed_blocks.contains(&signed_block) { - continue; - } - info!("Signed block"; "signer_sig_hash" => signed_block.to_hex()); - signed_blocks.insert(signed_block); + let mut last_count: Vec<_> = proposals_count + .iter() + .map(|x| x.load(Ordering::SeqCst)) + .collect(); + thread::Builder::new() + .name("blind-signer".into()) + .spawn(move || loop { + thread::sleep(Duration::from_millis(100)); + let cur_count: Vec<_> = proposals_count + .iter() + .map(|x| x.load(Ordering::SeqCst)) + .collect(); + if cur_count + .iter() + .zip(last_count.iter()) + .all(|(cur_count, last_count)| cur_count <= last_count) + { + continue; } - Err(e) => { - warn!("Error reading and signing block proposal: {e}"); + thread::sleep(Duration::from_secs(2)); + info!("Checking for a block proposal to sign..."); + last_count = cur_count; + let configs: Vec<&Config> = configs.iter().map(|x| x).collect(); + match read_and_sign_block_proposal(configs.as_slice(), &signers, &signed_blocks, &sender) { + Ok(signed_block) => { + if signed_blocks.contains(&signed_block) { + info!("Already signed block, will sleep and try again"; "signer_sig_hash" => signed_block.to_hex()); + thread::sleep(Duration::from_secs(5)); + match read_and_sign_block_proposal(configs.as_slice(), &signers, &signed_blocks, &sender) { + Ok(signed_block) => { + if signed_blocks.contains(&signed_block) { + info!("Already signed block, ignoring"; "signer_sig_hash" => signed_block.to_hex()); + continue; + } + info!("Signed block"; "signer_sig_hash" => signed_block.to_hex()); + signed_blocks.insert(signed_block); + } + Err(e) => { + warn!("Error reading and signing block proposal: {e}"); + } + }; + continue; + } + info!("Signed block"; "signer_sig_hash" => signed_block.to_hex()); + signed_blocks.insert(signed_block); + } + Err(e) => { + warn!("Error reading and signing block proposal: {e}"); + } } + }) + .unwrap() +} + +pub fn get_latest_block_proposal( + conf: &Config, + sortdb: &SortitionDB, +) -> Result<(NakamotoBlock, StacksPublicKey), String> { + let tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); + let (stackerdb_conf, miner_info) = + NakamotoChainState::make_miners_stackerdb_config(sortdb, &tip) + .map_err(|e| e.to_string())?; + let miner_ranges = stackerdb_conf.signer_ranges(); + let latest_miner = usize::from(miner_info.get_latest_winner_index()); + let miner_contract_id = boot_code_id(MINERS_NAME, false); + let mut miners_stackerdb = StackerDBSession::new(&conf.node.rpc_bind, miner_contract_id); + + let mut proposed_blocks: Vec<_> = stackerdb_conf + .signers + .iter() + .enumerate() + .zip(miner_ranges) + .filter_map(|((miner_ix, (miner_addr, _)), miner_slot_id)| { + let proposed_block = { + let message: SignerMessageV0 = + miners_stackerdb.get_latest(miner_slot_id.start).ok()??; + let SignerMessageV0::BlockProposal(block_proposal) = message else { + panic!("Expected a signer message block proposal. Got {message:?}"); + }; + block_proposal.block + }; + Some((proposed_block, miner_addr, miner_ix == latest_miner)) + }) + .collect(); + + proposed_blocks.sort_by(|(block_a, _, is_latest_a), (block_b, _, is_latest_b)| { + if block_a.header.chain_length > block_b.header.chain_length { + return std::cmp::Ordering::Greater; + } else if block_a.header.chain_length < block_b.header.chain_length { + return std::cmp::Ordering::Less; } - }) + // the heights are tied, tie break with the latest miner + if *is_latest_a { + return std::cmp::Ordering::Greater; + } + if *is_latest_b { + return std::cmp::Ordering::Less; + } + return std::cmp::Ordering::Equal; + }); + + for (b, _, is_latest) in proposed_blocks.iter() { + info!("Consider block"; "signer_sighash" => %b.header.signer_signature_hash(), "is_latest_sortition" => is_latest, "chain_height" => b.header.chain_length); + } + + let (proposed_block, miner_addr, _) = proposed_blocks.pop().unwrap(); + + let pubkey = StacksPublicKey::recover_to_pubkey( + proposed_block.header.miner_signature_hash().as_bytes(), + &proposed_block.header.miner_signature, + ) + .map_err(|e| e.to_string())?; + let miner_signed_addr = StacksAddress::p2pkh(false, &pubkey); + if miner_signed_addr.bytes != miner_addr.bytes { + return Err(format!( + "Invalid miner signature on proposal. Found {}, expected {}", + miner_signed_addr.bytes, miner_addr.bytes + )); + } + + Ok((proposed_block, pubkey)) +} + +#[allow(dead_code)] +fn get_block_proposal_msg_v1( + miners_stackerdb: &mut StackerDBSession, + slot_id: u32, +) -> NakamotoBlock { + let message: SignerMessageV1 = miners_stackerdb + .get_latest(slot_id) + .expect("Failed to get latest chunk from the miner slot ID") + .expect("No chunk found"); + let SignerMessageV1::Packet(packet) = message else { + panic!("Expected a signer message packet. Got {message:?}"); + }; + let Message::NonceRequest(nonce_request) = packet.msg else { + panic!("Expected a nonce request. Got {:?}", packet.msg); + }; + let block_proposal = + BlockProposal::consensus_deserialize(&mut nonce_request.message.as_slice()) + .expect("Failed to deserialize block proposal"); + block_proposal.block } pub fn read_and_sign_block_proposal( - conf: &Config, + configs: &[&Config], signers: &TestSigners, signed_blocks: &HashSet, - channel: &Sender, + channel: &Sender>, ) -> Result { + let conf = configs.first().unwrap(); let burnchain = conf.get_burnchain(); let sortdb = burnchain.open_sortition_db(true).unwrap(); + let (mut chainstate, _) = StacksChainState::open( + conf.is_mainnet(), + conf.burnchain.chain_id, + &conf.get_chainstate_path_str(), + None, + ) + .unwrap(); + let tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); - let miner_pubkey = StacksPublicKey::from_private(&conf.get_miner_config().mining_key.unwrap()); - let miner_slot_id = NakamotoChainState::get_miner_slot(&sortdb, &tip, &miner_pubkey) - .map_err(|_| "Unable to get miner slot")? - .ok_or("No miner slot exists")?; - let reward_cycle = burnchain - .block_height_to_reward_cycle(tip.block_height) - .unwrap(); - - let mut proposed_block: NakamotoBlock = { - let miner_contract_id = boot_code_id(MINERS_NAME, false); - let mut miners_stackerdb = StackerDBSession::new(&conf.node.rpc_bind, miner_contract_id); - miners_stackerdb - .get_latest(miner_slot_id.start) - .map_err(|_| "Failed to get latest chunk from the miner slot ID")? - .ok_or("No chunk found")? - }; + + let mut proposed_block = get_latest_block_proposal(conf, &sortdb)?.0; + let other_views_result: Result, _> = configs + .get(1..) + .unwrap() + .iter() + .map(|other_conf| { + get_latest_block_proposal(other_conf, &sortdb).map(|proposal| { + ( + proposal.0.header.signer_signature_hash(), + proposal.0.header.chain_length, + ) + }) + }) + .collect(); let proposed_block_hash = format!("0x{}", proposed_block.header.block_hash()); let signer_sig_hash = proposed_block.header.signer_signature_hash(); + let other_views = other_views_result?; + if !other_views.is_empty() { + info!( + "Fetched block proposals"; + "primary_latest_signer_sighash" => %signer_sig_hash, + "primary_latest_block_height" => proposed_block.header.chain_length, + "other_views" => ?other_views, + ); + } if signed_blocks.contains(&signer_sig_hash) { // already signed off on this block, don't sign again. return Ok(signer_sig_hash); } + let reward_set = load_nakamoto_reward_set( + burnchain + .pox_reward_cycle(tip.block_height.saturating_add(1)) + .unwrap(), + &tip.sortition_id, + &burnchain, + &mut chainstate, + &proposed_block.header.parent_block_id, + &sortdb, + &OnChainRewardSetProvider::new(), + ) + .expect("Failed to query reward set") + .expect("No reward set calculated") + .0 + .known_selected_anchor_block_owned() + .expect("Expected a reward set"); + info!( "Fetched proposed block from .miners StackerDB"; "proposed_block_hash" => &proposed_block_hash, "signer_sig_hash" => &signer_sig_hash.to_hex(), ); - signers - .clone() - .sign_nakamoto_block(&mut proposed_block, reward_cycle); + signers.sign_block_with_reward_set(&mut proposed_block, &reward_set); channel .send(proposed_block.header.signer_signature) @@ -427,6 +625,21 @@ where Ok(()) } +pub fn wait_for(timeout_secs: u64, mut check: F) -> Result<(), String> +where + F: FnMut() -> Result, +{ + let start = Instant::now(); + while !check()? { + if start.elapsed() > Duration::from_secs(timeout_secs) { + error!("Timed out waiting for check to process"); + return Err("Timed out".into()); + } + thread::sleep(Duration::from_millis(100)); + } + Ok(()) +} + /// Mine a bitcoin block, and wait until: /// (1) a new block has been processed by the coordinator pub fn next_block_and_process_new_stacks_block( @@ -460,54 +673,95 @@ pub fn next_block_and_mine_commit( coord_channels: &Arc>, commits_submitted: &Arc, ) -> Result<(), String> { - let commits_submitted = commits_submitted.clone(); - let blocks_processed_before = coord_channels - .lock() - .expect("Mutex poisoned") - .get_stacks_blocks_processed(); - let commits_before = commits_submitted.load(Ordering::SeqCst); - let mut block_processed_time: Option = None; - let mut commit_sent_time: Option = None; + next_block_and_wait_for_commits( + btc_controller, + timeout_secs, + &[coord_channels], + &[commits_submitted], + ) +} + +/// Mine a bitcoin block, and wait until: +/// (1) a new block has been processed by the coordinator +/// (2) 2 block commits have been issued ** or ** more than 10 seconds have +/// passed since (1) occurred +/// This waits for this check to pass on *all* supplied channels +pub fn next_block_and_wait_for_commits( + btc_controller: &mut BitcoinRegtestController, + timeout_secs: u64, + coord_channels: &[&Arc>], + commits_submitted: &[&Arc], +) -> Result<(), String> { + let commits_submitted: Vec<_> = commits_submitted.iter().cloned().collect(); + let blocks_processed_before: Vec<_> = coord_channels + .iter() + .map(|x| { + x.lock() + .expect("Mutex poisoned") + .get_stacks_blocks_processed() + }) + .collect(); + let commits_before: Vec<_> = commits_submitted + .iter() + .map(|x| x.load(Ordering::SeqCst)) + .collect(); + + let mut block_processed_time: Vec> = + (0..commits_before.len()).map(|_| None).collect(); + let mut commit_sent_time: Vec> = + (0..commits_before.len()).map(|_| None).collect(); next_block_and(btc_controller, timeout_secs, || { - let commits_sent = commits_submitted.load(Ordering::SeqCst); - let blocks_processed = coord_channels - .lock() - .expect("Mutex poisoned") - .get_stacks_blocks_processed(); - let now = Instant::now(); - if blocks_processed > blocks_processed_before && block_processed_time.is_none() { - block_processed_time.replace(now); - } - if commits_sent > commits_before && commit_sent_time.is_none() { - commit_sent_time.replace(now); - } - if blocks_processed > blocks_processed_before { - let block_processed_time = block_processed_time - .as_ref() - .ok_or("TEST-ERROR: Processed time wasn't set")?; - if commits_sent <= commits_before { - return Ok(false); - } - let commit_sent_time = commit_sent_time - .as_ref() - .ok_or("TEST-ERROR: Processed time wasn't set")?; - // try to ensure the commit was sent after the block was processed - if commit_sent_time > block_processed_time { - return Ok(true); + for i in 0..commits_submitted.len() { + let commits_sent = commits_submitted[i].load(Ordering::SeqCst); + let blocks_processed = coord_channels[i] + .lock() + .expect("Mutex poisoned") + .get_stacks_blocks_processed(); + let now = Instant::now(); + if blocks_processed > blocks_processed_before[i] && block_processed_time[i].is_none() { + block_processed_time[i].replace(now); } - // if two commits have been sent, one of them must have been after - if commits_sent >= commits_before + 2 { - return Ok(true); + if commits_sent > commits_before[i] && commit_sent_time[i].is_none() { + commit_sent_time[i].replace(now); } - // otherwise, just timeout if the commit was sent and its been long enough - // for a new commit pass to have occurred - if block_processed_time.elapsed() > Duration::from_secs(10) { - return Ok(true); + } + + for i in 0..commits_submitted.len() { + let blocks_processed = coord_channels[i] + .lock() + .expect("Mutex poisoned") + .get_stacks_blocks_processed(); + let commits_sent = commits_submitted[i].load(Ordering::SeqCst); + + if blocks_processed > blocks_processed_before[i] { + let block_processed_time = block_processed_time[i] + .as_ref() + .ok_or("TEST-ERROR: Processed time wasn't set")?; + if commits_sent <= commits_before[i] { + return Ok(false); + } + let commit_sent_time = commit_sent_time[i] + .as_ref() + .ok_or("TEST-ERROR: Processed time wasn't set")?; + // try to ensure the commit was sent after the block was processed + if commit_sent_time > block_processed_time { + continue; + } + // if two commits have been sent, one of them must have been after + if commits_sent >= commits_before[i] + 2 { + continue; + } + // otherwise, just timeout if the commit was sent and its been long enough + // for a new commit pass to have occurred + if block_processed_time.elapsed() > Duration::from_secs(10) { + continue; + } + return Ok(false); + } else { + return Ok(false); } - Ok(false) - } else { - Ok(false) } + Ok(true) }) } @@ -529,16 +783,16 @@ pub fn boot_to_epoch_3( blocks_processed: &Arc, stacker_sks: &[StacksPrivateKey], signer_sks: &[StacksPrivateKey], - self_signing: Option<&TestSigners>, + self_signing: &mut Option<&mut TestSigners>, btc_regtest_controller: &mut BitcoinRegtestController, ) { assert_eq!(stacker_sks.len(), signer_sks.len()); let epochs = naka_conf.burnchain.epochs.clone().unwrap(); let epoch_3 = &epochs[StacksEpoch::find_epoch_by_id(&epochs, StacksEpochId::Epoch30).unwrap()]; - + let current_height = btc_regtest_controller.get_headers_height(); info!( - "Chain bootstrapped to bitcoin block 201, starting Epoch 2x miner"; + "Chain bootstrapped to bitcoin block {current_height:?}, starting Epoch 2x miner"; "Epoch 3.0 Boundary" => (epoch_3.start_height - 1), ); let http_origin = format!("http://{}", &naka_conf.node.rpc_bind); @@ -547,6 +801,17 @@ pub fn boot_to_epoch_3( // first mined stacks block next_block_and_wait(btc_regtest_controller, &blocks_processed); + let start_time = Instant::now(); + loop { + if start_time.elapsed() > Duration::from_secs(20) { + panic!("Timed out waiting for the stacks height to increment") + } + let stacks_height = get_chain_info(&naka_conf).stacks_tip_height; + if stacks_height >= 1 { + break; + } + thread::sleep(Duration::from_millis(100)); + } // stack enough to activate pox-4 let block_height = btc_regtest_controller.get_headers_height(); @@ -599,6 +864,11 @@ pub fn boot_to_epoch_3( submit_tx(&http_origin, &stacking_tx); } + // Update TestSigner with `signer_sks` if self-signing + if let Some(ref mut signers) = self_signing { + signers.signer_keys = signer_sks.to_vec(); + } + let prepare_phase_start = btc_regtest_controller .get_burnchain() .pox_constants @@ -803,16 +1073,13 @@ fn signer_vote_if_needed( } } -/// -/// * `stacker_sks` - must be a private key for sending a large `stack-stx` transaction in order -/// for pox-4 to activate -/// * `signer_pks` - must be the same size as `stacker_sks` -pub fn boot_to_epoch_3_reward_set( +pub fn setup_epoch_3_reward_set( naka_conf: &Config, blocks_processed: &Arc, stacker_sks: &[StacksPrivateKey], signer_sks: &[StacksPrivateKey], btc_regtest_controller: &mut BitcoinRegtestController, + num_stacking_cycles: Option, ) { assert_eq!(stacker_sks.len(), signer_sks.len()); @@ -828,9 +1095,6 @@ pub fn boot_to_epoch_3_reward_set( ); let epoch_3_reward_cycle_boundary = epoch_3_start_height.saturating_sub(epoch_3_start_height % reward_cycle_len); - let epoch_3_reward_set_calculation_boundary = - epoch_3_reward_cycle_boundary.saturating_sub(prepare_phase_len); - let epoch_3_reward_set_calculation = epoch_3_reward_set_calculation_boundary.wrapping_add(2); // +2 to ensure we are at the second block of the prepare phase let http_origin = format!("http://{}", &naka_conf.node.rpc_bind); next_block_and_wait(btc_regtest_controller, &blocks_processed); next_block_and_wait(btc_regtest_controller, &blocks_processed); @@ -843,15 +1107,14 @@ pub fn boot_to_epoch_3_reward_set( .get_burnchain() .block_height_to_reward_cycle(block_height) .unwrap(); - let lock_period = 12; - debug!("Test Cycle Info"; - "prepare_phase_len" => {prepare_phase_len}, - "reward_cycle_len" => {reward_cycle_len}, - "block_height" => {block_height}, - "reward_cycle" => {reward_cycle}, - "epoch_3_reward_cycle_boundary" => {epoch_3_reward_cycle_boundary}, - "epoch_3_reward_set_calculation" => {epoch_3_reward_set_calculation}, - "epoch_3_start_height" => {epoch_3_start_height}, + let lock_period: u128 = num_stacking_cycles.unwrap_or(12_u64).into(); + info!("Test Cycle Info"; + "prepare_phase_len" => {prepare_phase_len}, + "reward_cycle_len" => {reward_cycle_len}, + "block_height" => {block_height}, + "reward_cycle" => {reward_cycle}, + "epoch_3_reward_cycle_boundary" => {epoch_3_reward_cycle_boundary}, + "epoch_3_start_height" => {epoch_3_start_height}, ); for (stacker_sk, signer_sk) in stacker_sks.iter().zip(signer_sks.iter()) { let pox_addr = PoxAddress::from_legacy( @@ -895,15 +1158,133 @@ pub fn boot_to_epoch_3_reward_set( ); submit_tx(&http_origin, &stacking_tx); } +} + +/// +/// * `stacker_sks` - must be a private key for sending a large `stack-stx` transaction in order +/// for pox-4 to activate +/// * `signer_pks` - must be the same size as `stacker_sks` +pub fn boot_to_epoch_3_reward_set_calculation_boundary( + naka_conf: &Config, + blocks_processed: &Arc, + stacker_sks: &[StacksPrivateKey], + signer_sks: &[StacksPrivateKey], + btc_regtest_controller: &mut BitcoinRegtestController, + num_stacking_cycles: Option, +) { + setup_epoch_3_reward_set( + naka_conf, + blocks_processed, + stacker_sks, + signer_sks, + btc_regtest_controller, + num_stacking_cycles, + ); + + let epochs = naka_conf.burnchain.epochs.clone().unwrap(); + let epoch_3 = &epochs[StacksEpoch::find_epoch_by_id(&epochs, StacksEpochId::Epoch30).unwrap()]; + let reward_cycle_len = naka_conf.get_burnchain().pox_constants.reward_cycle_length as u64; + let prepare_phase_len = naka_conf.get_burnchain().pox_constants.prepare_length as u64; + + let epoch_3_start_height = epoch_3.start_height; + assert!( + epoch_3_start_height > 0, + "Epoch 3.0 start height must be greater than 0" + ); + let epoch_3_reward_cycle_boundary = + epoch_3_start_height.saturating_sub(epoch_3_start_height % reward_cycle_len); + let epoch_3_reward_set_calculation_boundary = epoch_3_reward_cycle_boundary + .saturating_sub(prepare_phase_len) + .saturating_add(1); + + run_until_burnchain_height( + btc_regtest_controller, + &blocks_processed, + epoch_3_reward_set_calculation_boundary, + &naka_conf, + ); + + info!("Bootstrapped to Epoch 3.0 reward set calculation boundary height: {epoch_3_reward_set_calculation_boundary}."); +} + +/// +/// * `stacker_sks` - must be a private key for sending a large `stack-stx` transaction in order +/// for pox-4 to activate +/// * `signer_pks` - must be the same size as `stacker_sks` +pub fn boot_to_epoch_25( + naka_conf: &Config, + blocks_processed: &Arc, + btc_regtest_controller: &mut BitcoinRegtestController, +) { + let epochs = naka_conf.burnchain.epochs.clone().unwrap(); + let epoch_25 = &epochs[StacksEpoch::find_epoch_by_id(&epochs, StacksEpochId::Epoch25).unwrap()]; + let reward_cycle_len = naka_conf.get_burnchain().pox_constants.reward_cycle_length as u64; + let prepare_phase_len = naka_conf.get_burnchain().pox_constants.prepare_length as u64; + let epoch_25_start_height = epoch_25.start_height; + assert!( + epoch_25_start_height > 0, + "Epoch 2.5 start height must be greater than 0" + ); + // stack enough to activate pox-4 + let block_height = btc_regtest_controller.get_headers_height(); + let reward_cycle = btc_regtest_controller + .get_burnchain() + .block_height_to_reward_cycle(block_height) + .unwrap(); + debug!("Test Cycle Info"; + "prepare_phase_len" => {prepare_phase_len}, + "reward_cycle_len" => {reward_cycle_len}, + "block_height" => {block_height}, + "reward_cycle" => {reward_cycle}, + "epoch_25_start_height" => {epoch_25_start_height}, + ); run_until_burnchain_height( btc_regtest_controller, &blocks_processed, - epoch_3_reward_set_calculation, + epoch_25_start_height, &naka_conf, ); + info!("Bootstrapped to Epoch 2.5: {epoch_25_start_height}."); +} + +/// +/// * `stacker_sks` - must be a private key for sending a large `stack-stx` transaction in order +/// for pox-4 to activate +/// * `signer_pks` - must be the same size as `stacker_sks` +pub fn boot_to_epoch_3_reward_set( + naka_conf: &Config, + blocks_processed: &Arc, + stacker_sks: &[StacksPrivateKey], + signer_sks: &[StacksPrivateKey], + btc_regtest_controller: &mut BitcoinRegtestController, + num_stacking_cycles: Option, +) { + boot_to_epoch_3_reward_set_calculation_boundary( + naka_conf, + blocks_processed, + stacker_sks, + signer_sks, + btc_regtest_controller, + num_stacking_cycles, + ); + next_block_and_wait(btc_regtest_controller, &blocks_processed); + info!( + "Bootstrapped to Epoch 3.0 reward set calculation height: {}", + get_chain_info(naka_conf).burn_block_height + ); +} - info!("Bootstrapped to Epoch 3.0 reward set calculation height: {epoch_3_reward_set_calculation}."); +/// Wait for a block commit, without producing a block +fn wait_for_first_naka_block_commit(timeout_secs: u64, naka_commits_submitted: &Arc) { + let start = Instant::now(); + while naka_commits_submitted.load(Ordering::SeqCst) < 1 { + if start.elapsed() > Duration::from_secs(timeout_secs) { + error!("Timed out waiting for block commit"); + panic!(); + } + thread::sleep(Duration::from_millis(100)); + } } #[test] @@ -921,7 +1302,6 @@ fn simple_neon_integration() { return; } - let signers = TestSigners::default(); let (mut naka_conf, _miner_account) = naka_neon_integration_conf(None); let prom_bind = format!("{}:{}", "127.0.0.1", 6000); naka_conf.node.prometheus_bind = Some(prom_bind.clone()); @@ -933,10 +1313,11 @@ fn simple_neon_integration() { let send_fee = 100; naka_conf.add_initial_balance( PrincipalData::from(sender_addr.clone()).to_string(), - send_amt + send_fee, + send_amt * 2 + send_fee, ); let sender_signer_sk = Secp256k1PrivateKey::new(); let sender_signer_addr = tests::to_addr(&sender_signer_sk); + let mut signers = TestSigners::new(vec![sender_signer_sk.clone()]); naka_conf.add_initial_balance( PrincipalData::from(sender_signer_addr.clone()).to_string(), 100000, @@ -962,7 +1343,6 @@ fn simple_neon_integration() { let run_loop_stopper = run_loop.get_termination_switch(); let Counters { blocks_processed, - naka_submitted_vrfs: vrfs_submitted, naka_submitted_commits: commits_submitted, naka_proposed_blocks: proposals_submitted, .. @@ -977,7 +1357,7 @@ fn simple_neon_integration() { &blocks_processed, &[stacker_sk], &[sender_signer_sk], - Some(&signers), + &mut Some(&mut signers), &mut btc_regtest_controller, ); @@ -1017,19 +1397,7 @@ fn simple_neon_integration() { info!("Nakamoto miner started..."); blind_signer(&naka_conf, &signers, proposals_submitted); - // first block wakes up the run loop, wait until a key registration has been submitted. - next_block_and(&mut btc_regtest_controller, 60, || { - let vrf_count = vrfs_submitted.load(Ordering::SeqCst); - Ok(vrf_count >= 1) - }) - .unwrap(); - - // second block should confirm the VRF register, wait until a block commit is submitted - next_block_and(&mut btc_regtest_controller, 60, || { - let commits_count = commits_submitted.load(Ordering::SeqCst); - Ok(commits_count >= 1) - }) - .unwrap(); + wait_for_first_naka_block_commit(60, &commits_submitted); // Mine 15 nakamoto tenures for _i in 0..15 { @@ -1122,6 +1490,10 @@ fn simple_neon_integration() { assert!(tip.anchored_header.as_stacks_nakamoto().is_some()); assert!(tip.stacks_block_height >= block_height_pre_3_0 + 30); + // Check that we aren't missing burn blocks + let bhh = u64::from(tip.burn_header_height); + test_observer::contains_burn_block_range(220..=bhh).unwrap(); + // make sure prometheus returns an updated height #[cfg(feature = "monitoring_prom")] { @@ -1160,7 +1532,6 @@ fn mine_multiple_per_tenure_integration() { return; } - let signers = TestSigners::default(); let (mut naka_conf, _miner_account) = naka_neon_integration_conf(None); let http_origin = format!("http://{}", &naka_conf.node.rpc_bind); naka_conf.miner.wait_on_interim_blocks = Duration::from_secs(1); @@ -1203,7 +1574,6 @@ fn mine_multiple_per_tenure_integration() { let run_loop_stopper = run_loop.get_termination_switch(); let Counters { blocks_processed, - naka_submitted_vrfs: vrfs_submitted, naka_submitted_commits: commits_submitted, naka_proposed_blocks: proposals_submitted, .. @@ -1216,12 +1586,13 @@ fn mine_multiple_per_tenure_integration() { .spawn(move || run_loop.start(None, 0)) .unwrap(); wait_for_runloop(&blocks_processed); + let mut signers = TestSigners::new(vec![sender_signer_sk.clone()]); boot_to_epoch_3( &naka_conf, &blocks_processed, &[stacker_sk], &[sender_signer_sk], - Some(&signers), + &mut Some(&mut signers), &mut btc_regtest_controller, ); @@ -1246,22 +1617,11 @@ fn mine_multiple_per_tenure_integration() { info!("Nakamoto miner started..."); blind_signer(&naka_conf, &signers, proposals_submitted); - // first block wakes up the run loop, wait until a key registration has been submitted. - next_block_and(&mut btc_regtest_controller, 60, || { - let vrf_count = vrfs_submitted.load(Ordering::SeqCst); - Ok(vrf_count >= 1) - }) - .unwrap(); - - // second block should confirm the VRF register, wait until a block commit is submitted - next_block_and(&mut btc_regtest_controller, 60, || { - let commits_count = commits_submitted.load(Ordering::SeqCst); - Ok(commits_count >= 1) - }) - .unwrap(); + wait_for_first_naka_block_commit(60, &commits_submitted); // Mine `tenure_count` nakamoto tenures for tenure_ix in 0..tenure_count { + debug!("Mining tenure {}", tenure_ix); let commits_before = commits_submitted.load(Ordering::SeqCst); next_block_and_process_new_stacks_block(&mut btc_regtest_controller, 60, &coord_channel) .unwrap(); @@ -1335,6 +1695,264 @@ fn mine_multiple_per_tenure_integration() { run_loop_thread.join().unwrap(); } +#[test] +#[ignore] +/// This test spins up two nakamoto nodes, both configured to mine. +/// It starts in Epoch 2.0, mines with `neon_node` to Epoch 3.0, and then switches +/// to Nakamoto operation (activating pox-4 by submitting a stack-stx tx). The BootLoop +/// struct handles the epoch-2/3 tear-down and spin-up. +/// This test makes three assertions: +/// * 15 tenures are mined after 3.0 starts +/// * Each tenure has 6 blocks (the coinbase block and 5 interim blocks) +/// * Both nodes see the same chainstate at the end of the test +fn multiple_miners() { + if env::var("BITCOIND_TEST") != Ok("1".into()) { + return; + } + + let (mut naka_conf, _miner_account) = naka_neon_integration_conf(None); + naka_conf.node.local_peer_seed = vec![1, 1, 1, 1]; + naka_conf.miner.mining_key = Some(Secp256k1PrivateKey::from_seed(&[1])); + + let node_2_rpc = 51026; + let node_2_p2p = 51025; + let http_origin = format!("http://{}", &naka_conf.node.rpc_bind); + naka_conf.miner.wait_on_interim_blocks = Duration::from_secs(1); + let sender_sk = Secp256k1PrivateKey::new(); + let sender_signer_sk = Secp256k1PrivateKey::new(); + let sender_signer_addr = tests::to_addr(&sender_signer_sk); + let tenure_count = 15; + let inter_blocks_per_tenure = 6; + // setup sender + recipient for some test stx transfers + // these are necessary for the interim blocks to get mined at all + let sender_addr = tests::to_addr(&sender_sk); + let send_amt = 100; + let send_fee = 180; + naka_conf.add_initial_balance( + PrincipalData::from(sender_addr.clone()).to_string(), + (send_amt + send_fee) * tenure_count * inter_blocks_per_tenure, + ); + naka_conf.add_initial_balance( + PrincipalData::from(sender_signer_addr.clone()).to_string(), + 100000, + ); + let recipient = PrincipalData::from(StacksAddress::burn_address(false)); + let stacker_sk = setup_stacker(&mut naka_conf); + + let mut conf_node_2 = naka_conf.clone(); + let localhost = "127.0.0.1"; + conf_node_2.node.rpc_bind = format!("{}:{}", localhost, node_2_rpc); + conf_node_2.node.p2p_bind = format!("{}:{}", localhost, node_2_p2p); + conf_node_2.node.data_url = format!("http://{}:{}", localhost, node_2_rpc); + conf_node_2.node.p2p_address = format!("{}:{}", localhost, node_2_p2p); + conf_node_2.node.seed = vec![2, 2, 2, 2]; + conf_node_2.burnchain.local_mining_public_key = Some( + Keychain::default(conf_node_2.node.seed.clone()) + .get_pub_key() + .to_hex(), + ); + conf_node_2.node.local_peer_seed = vec![2, 2, 2, 2]; + conf_node_2.node.miner = true; + conf_node_2.miner.mining_key = Some(Secp256k1PrivateKey::from_seed(&[2])); + conf_node_2.events_observers.clear(); + + let node_1_sk = Secp256k1PrivateKey::from_seed(&naka_conf.node.local_peer_seed); + let node_1_pk = StacksPublicKey::from_private(&node_1_sk); + + conf_node_2.node.working_dir = format!("{}-{}", conf_node_2.node.working_dir, "1"); + + conf_node_2.node.set_bootstrap_nodes( + format!("{}@{}", &node_1_pk.to_hex(), naka_conf.node.p2p_bind), + naka_conf.burnchain.chain_id, + naka_conf.burnchain.peer_version, + ); + + test_observer::spawn(); + let observer_port = test_observer::EVENT_OBSERVER_PORT; + naka_conf.events_observers.insert(EventObserverConfig { + endpoint: format!("localhost:{observer_port}"), + events_keys: vec![EventKeyType::AnyEvent], + }); + + let mut btcd_controller = BitcoinCoreController::new(naka_conf.clone()); + btcd_controller + .start_bitcoind() + .expect("Failed starting bitcoind"); + let mut btc_regtest_controller = BitcoinRegtestController::new(naka_conf.clone(), None); + btc_regtest_controller.bootstrap_chain_to_pks( + 201, + &[ + Secp256k1PublicKey::from_hex( + naka_conf + .burnchain + .local_mining_public_key + .as_ref() + .unwrap(), + ) + .unwrap(), + Secp256k1PublicKey::from_hex( + conf_node_2 + .burnchain + .local_mining_public_key + .as_ref() + .unwrap(), + ) + .unwrap(), + ], + ); + + let mut run_loop = boot_nakamoto::BootRunLoop::new(naka_conf.clone()).unwrap(); + let mut run_loop_2 = boot_nakamoto::BootRunLoop::new(conf_node_2.clone()).unwrap(); + let run_loop_stopper = run_loop.get_termination_switch(); + let Counters { + blocks_processed, + naka_submitted_commits: commits_submitted, + naka_proposed_blocks: proposals_submitted, + .. + } = run_loop.counters(); + + let run_loop_2_stopper = run_loop.get_termination_switch(); + let Counters { + naka_proposed_blocks: proposals_submitted_2, + .. + } = run_loop_2.counters(); + + let coord_channel = run_loop.coordinator_channels(); + let coord_channel_2 = run_loop_2.coordinator_channels(); + + let _run_loop_2_thread = thread::Builder::new() + .name("run_loop_2".into()) + .spawn(move || run_loop_2.start(None, 0)) + .unwrap(); + + let run_loop_thread = thread::Builder::new() + .name("run_loop".into()) + .spawn(move || run_loop.start(None, 0)) + .unwrap(); + wait_for_runloop(&blocks_processed); + + let mut signers = TestSigners::new(vec![sender_signer_sk.clone()]); + boot_to_epoch_3( + &naka_conf, + &blocks_processed, + &[stacker_sk], + &[sender_signer_sk], + &mut Some(&mut signers), + &mut btc_regtest_controller, + ); + + info!("Bootstrapped to Epoch-3.0 boundary, starting nakamoto miner"); + + let burnchain = naka_conf.get_burnchain(); + let sortdb = burnchain.open_sortition_db(true).unwrap(); + let (chainstate, _) = StacksChainState::open( + naka_conf.is_mainnet(), + naka_conf.burnchain.chain_id, + &naka_conf.get_chainstate_path_str(), + None, + ) + .unwrap(); + + let block_height_pre_3_0 = + NakamotoChainState::get_canonical_block_header(chainstate.db(), &sortdb) + .unwrap() + .unwrap() + .stacks_block_height; + + info!("Nakamoto miner started..."); + blind_signer_multinode( + &signers, + &[&naka_conf, &conf_node_2], + vec![proposals_submitted, proposals_submitted_2], + ); + + info!("Neighbors 1"; "neighbors" => ?get_neighbors(&naka_conf)); + info!("Neighbors 2"; "neighbors" => ?get_neighbors(&conf_node_2)); + + // Wait one block to confirm the VRF register, wait until a block commit is submitted + wait_for_first_naka_block_commit(60, &commits_submitted); + + // Mine `tenure_count` nakamoto tenures + for tenure_ix in 0..tenure_count { + info!("Mining tenure {}", tenure_ix); + let commits_before = commits_submitted.load(Ordering::SeqCst); + next_block_and_process_new_stacks_block(&mut btc_regtest_controller, 60, &coord_channel) + .unwrap(); + + let mut last_tip = BlockHeaderHash([0x00; 32]); + let mut last_tip_height = 0; + + // mine the interim blocks + for interim_block_ix in 0..inter_blocks_per_tenure { + let blocks_processed_before = coord_channel + .lock() + .expect("Mutex poisoned") + .get_stacks_blocks_processed(); + // submit a tx so that the miner will mine an extra block + let sender_nonce = tenure_ix * inter_blocks_per_tenure + interim_block_ix; + let transfer_tx = + make_stacks_transfer(&sender_sk, sender_nonce, send_fee, &recipient, send_amt); + submit_tx(&http_origin, &transfer_tx); + + wait_for(20, || { + let blocks_processed = coord_channel + .lock() + .expect("Mutex poisoned") + .get_stacks_blocks_processed(); + Ok(blocks_processed > blocks_processed_before) + }) + .unwrap(); + + let info = get_chain_info_result(&naka_conf).unwrap(); + assert_ne!(info.stacks_tip, last_tip); + assert_ne!(info.stacks_tip_height, last_tip_height); + + last_tip = info.stacks_tip; + last_tip_height = info.stacks_tip_height; + } + + wait_for(20, || { + Ok(commits_submitted.load(Ordering::SeqCst) > commits_before) + }) + .unwrap(); + } + + // load the chain tip, and assert that it is a nakamoto block and at least 30 blocks have advanced in epoch 3 + let tip = NakamotoChainState::get_canonical_block_header(chainstate.db(), &sortdb) + .unwrap() + .unwrap(); + info!( + "Latest tip"; + "height" => tip.stacks_block_height, + "is_nakamoto" => tip.anchored_header.as_stacks_nakamoto().is_some(), + ); + + let peer_1_height = get_chain_info(&naka_conf).stacks_tip_height; + let peer_2_height = get_chain_info(&conf_node_2).stacks_tip_height; + info!("Peer height information"; "peer_1" => peer_1_height, "peer_2" => peer_2_height); + assert_eq!(peer_1_height, peer_2_height); + + assert!(tip.anchored_header.as_stacks_nakamoto().is_some()); + assert_eq!( + tip.stacks_block_height, + block_height_pre_3_0 + ((inter_blocks_per_tenure + 1) * tenure_count), + "Should have mined (1 + interim_blocks_per_tenure) * tenure_count nakamoto blocks" + ); + + coord_channel + .lock() + .expect("Mutex poisoned") + .stop_chains_coordinator(); + coord_channel_2 + .lock() + .expect("Mutex poisoned") + .stop_chains_coordinator(); + run_loop_stopper.store(false, Ordering::SeqCst); + run_loop_2_stopper.store(false, Ordering::SeqCst); + + run_loop_thread.join().unwrap(); +} + #[test] #[ignore] fn correct_burn_outs() { @@ -1342,7 +1960,6 @@ fn correct_burn_outs() { return; } - let signers = TestSigners::default(); let (mut naka_conf, _miner_account) = naka_neon_integration_conf(None); naka_conf.burnchain.pox_reward_length = Some(10); naka_conf.burnchain.pox_prepare_length = Some(3); @@ -1379,6 +1996,8 @@ fn correct_burn_outs() { 100000, ); + let signers = TestSigners::new(vec![sender_signer_sk]); + test_observer::spawn(); let observer_port = test_observer::EVENT_OBSERVER_PORT; naka_conf.events_observers.insert(EventObserverConfig { @@ -1397,7 +2016,6 @@ fn correct_burn_outs() { let run_loop_stopper = run_loop.get_termination_switch(); let Counters { blocks_processed, - naka_submitted_vrfs: vrfs_submitted, naka_submitted_commits: commits_submitted, naka_proposed_blocks: proposals_submitted, .. @@ -1414,9 +2032,9 @@ fn correct_burn_outs() { let epochs = naka_conf.burnchain.epochs.clone().unwrap(); let epoch_3 = &epochs[StacksEpoch::find_epoch_by_id(&epochs, StacksEpochId::Epoch30).unwrap()]; let epoch_25 = &epochs[StacksEpoch::find_epoch_by_id(&epochs, StacksEpochId::Epoch25).unwrap()]; - + let current_height = btc_regtest_controller.get_headers_height(); info!( - "Chain bootstrapped to bitcoin block 201, starting Epoch 2x miner"; + "Chain bootstrapped to bitcoin block {current_height:?}, starting Epoch 2x miner"; "Epoch 3.0 Boundary" => (epoch_3.start_height - 1), ); @@ -1569,19 +2187,7 @@ fn correct_burn_outs() { ); assert_eq!(stacker_response.stacker_set.rewarded_addresses.len(), 1); - // first block wakes up the run loop, wait until a key registration has been submitted. - next_block_and(&mut btc_regtest_controller, 60, || { - let vrf_count = vrfs_submitted.load(Ordering::SeqCst); - Ok(vrf_count >= 1) - }) - .unwrap(); - - // second block should confirm the VRF register, wait until a block commit is submitted - next_block_and(&mut btc_regtest_controller, 60, || { - let commits_count = commits_submitted.load(Ordering::SeqCst); - Ok(commits_count >= 1) - }) - .unwrap(); + wait_for_first_naka_block_commit(60, &commits_submitted); info!("Bootstrapped to Epoch-3.0 boundary, mining nakamoto blocks"); @@ -1707,7 +2313,6 @@ fn block_proposal_api_endpoint() { return; } - let signers = TestSigners::default(); let (mut conf, _miner_account) = naka_neon_integration_conf(None); let password = "12345".to_string(); conf.connection_options.block_proposal_token = Some(password.clone()); @@ -1739,7 +2344,6 @@ fn block_proposal_api_endpoint() { let run_loop_stopper = run_loop.get_termination_switch(); let Counters { blocks_processed, - naka_submitted_vrfs: vrfs_submitted, naka_submitted_commits: commits_submitted, naka_proposed_blocks: proposals_submitted, .. @@ -1748,13 +2352,14 @@ fn block_proposal_api_endpoint() { let coord_channel = run_loop.coordinator_channels(); let run_loop_thread = thread::spawn(move || run_loop.start(None, 0)); + let mut signers = TestSigners::new(vec![sender_signer_sk.clone()]); wait_for_runloop(&blocks_processed); boot_to_epoch_3( &conf, &blocks_processed, &[stacker_sk], &[sender_signer_sk], - Some(&signers), + &mut Some(&mut signers), &mut btc_regtest_controller, ); @@ -1779,19 +2384,7 @@ fn block_proposal_api_endpoint() { info!("Nakamoto miner started..."); - // first block wakes up the run loop, wait until a key registration has been submitted. - next_block_and(&mut btc_regtest_controller, 60, || { - let vrf_count = vrfs_submitted.load(Ordering::SeqCst); - Ok(vrf_count >= 1) - }) - .unwrap(); - - // second block should confirm the VRF register, wait until a block commit is submitted - next_block_and(&mut btc_regtest_controller, 60, || { - let commits_count = commits_submitted.load(Ordering::SeqCst); - Ok(commits_count >= 1) - }) - .unwrap(); + wait_for_first_naka_block_commit(60, &commits_submitted); // Mine 3 nakamoto tenures for _ in 0..3 { @@ -1852,10 +2445,11 @@ fn block_proposal_api_endpoint() { total_burn, tenure_change, coinbase, + 1, ) .expect("Failed to build Nakamoto block"); - let burn_dbconn = btc_regtest_controller.sortdb_ref().index_conn(); + let burn_dbconn = btc_regtest_controller.sortdb_ref().index_handle_at_tip(); let mut miner_tenure_info = builder .load_tenure_info(&mut chainstate, &burn_dbconn, tenure_cause) .unwrap(); @@ -2068,7 +2662,6 @@ fn miner_writes_proposed_block_to_stackerdb() { return; } - let signers = TestSigners::default(); let (mut naka_conf, _miner_account) = naka_neon_integration_conf(None); naka_conf.miner.wait_on_interim_blocks = Duration::from_secs(1000); let sender_sk = Secp256k1PrivateKey::new(); @@ -2089,6 +2682,8 @@ fn miner_writes_proposed_block_to_stackerdb() { 100000, ); + let mut signers = TestSigners::new(vec![sender_signer_sk.clone()]); + test_observer::spawn(); let observer_port = test_observer::EVENT_OBSERVER_PORT; naka_conf.events_observers.insert(EventObserverConfig { @@ -2107,7 +2702,6 @@ fn miner_writes_proposed_block_to_stackerdb() { let run_loop_stopper = run_loop.get_termination_switch(); let Counters { blocks_processed, - naka_submitted_vrfs: vrfs_submitted, naka_submitted_commits: commits_submitted, naka_proposed_blocks: proposals_submitted, .. @@ -2122,25 +2716,14 @@ fn miner_writes_proposed_block_to_stackerdb() { &blocks_processed, &[stacker_sk], &[sender_signer_sk], - Some(&signers), + &mut Some(&mut signers), &mut btc_regtest_controller, ); info!("Nakamoto miner started..."); blind_signer(&naka_conf, &signers, proposals_submitted); - // first block wakes up the run loop, wait until a key registration has been submitted. - next_block_and(&mut btc_regtest_controller, 60, || { - let vrf_count = vrfs_submitted.load(Ordering::SeqCst); - Ok(vrf_count >= 1) - }) - .unwrap(); - // second block should confirm the VRF register, wait until a block commit is submitted - next_block_and(&mut btc_regtest_controller, 60, || { - let commits_count = commits_submitted.load(Ordering::SeqCst); - Ok(commits_count >= 1) - }) - .unwrap(); + wait_for_first_naka_block_commit(60, &commits_submitted); // Mine 1 nakamoto tenure next_block_and_mine_commit( @@ -2152,26 +2735,14 @@ fn miner_writes_proposed_block_to_stackerdb() { .unwrap(); let sortdb = naka_conf.get_burnchain().open_sortition_db(true).unwrap(); - let tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); - let miner_pubkey = - StacksPublicKey::from_private(&naka_conf.get_miner_config().mining_key.unwrap()); - let slot_id = NakamotoChainState::get_miner_slot(&sortdb, &tip, &miner_pubkey) - .expect("Unable to get miner slot") - .expect("No miner slot exists"); - - let proposed_block: NakamotoBlock = { - let miner_contract_id = boot_code_id(MINERS_NAME, false); - let mut miners_stackerdb = - StackerDBSession::new(&naka_conf.node.rpc_bind, miner_contract_id); - miners_stackerdb - .get_latest(slot_id.start) - .expect("Failed to get latest chunk from the miner slot ID") - .expect("No chunk found") - }; + + let proposed_block = get_latest_block_proposal(&naka_conf, &sortdb) + .expect("Expected to find a proposed block in the StackerDB") + .0; let proposed_block_hash = format!("0x{}", proposed_block.header.block_hash()); let mut proposed_zero_block = proposed_block.clone(); - proposed_zero_block.header.signer_signature = ThresholdSignature::empty(); + proposed_zero_block.header.signer_signature = vec![]; let proposed_zero_block_hash = format!("0x{}", proposed_zero_block.header.block_hash()); coord_channel @@ -2201,7 +2772,7 @@ fn miner_writes_proposed_block_to_stackerdb() { let signer_bitvec = BitVec::<4000>::consensus_deserialize(&mut signer_bitvec_bytes.as_slice()) .expect("Failed to deserialize signer bitvec"); - assert_eq!(signer_bitvec.len(), 1); + assert_eq!(signer_bitvec.len(), 30); assert_eq!( format!("0x{}", observed_block.block_hash), @@ -2217,13 +2788,14 @@ fn vote_for_aggregate_key_burn_op() { return; } - let signers = TestSigners::default(); let (mut naka_conf, _miner_account) = naka_neon_integration_conf(None); let _http_origin = format!("http://{}", &naka_conf.node.rpc_bind); naka_conf.miner.wait_on_interim_blocks = Duration::from_secs(1); let signer_sk = Secp256k1PrivateKey::new(); let signer_addr = tests::to_addr(&signer_sk); + let mut signers = TestSigners::new(vec![signer_sk.clone()]); + naka_conf.add_initial_balance(PrincipalData::from(signer_addr.clone()).to_string(), 100000); let stacker_sk = setup_stacker(&mut naka_conf); @@ -2245,7 +2817,6 @@ fn vote_for_aggregate_key_burn_op() { let run_loop_stopper = run_loop.get_termination_switch(); let Counters { blocks_processed, - naka_submitted_vrfs: vrfs_submitted, naka_submitted_commits: commits_submitted, naka_proposed_blocks: proposals_submitted, .. @@ -2263,7 +2834,7 @@ fn vote_for_aggregate_key_burn_op() { &blocks_processed, &[stacker_sk], &[signer_sk], - Some(&signers), + &mut Some(&mut signers), &mut btc_regtest_controller, ); @@ -2281,19 +2852,8 @@ fn vote_for_aggregate_key_burn_op() { info!("Nakamoto miner started..."); blind_signer(&naka_conf, &signers, proposals_submitted); - // first block wakes up the run loop, wait until a key registration has been submitted. - next_block_and(&mut btc_regtest_controller, 60, || { - let vrf_count = vrfs_submitted.load(Ordering::SeqCst); - Ok(vrf_count >= 1) - }) - .unwrap(); - // second block should confirm the VRF register, wait until a block commit is submitted - next_block_and(&mut btc_regtest_controller, 60, || { - let commits_count = commits_submitted.load(Ordering::SeqCst); - Ok(commits_count >= 1) - }) - .unwrap(); + wait_for_first_naka_block_commit(60, &commits_submitted); // submit a pre-stx op let mut miner_signer = Keychain::default(naka_conf.node.seed.clone()).generate_op_signer(); @@ -2462,13 +3022,13 @@ fn follower_bootup() { return; } - let signers = TestSigners::default(); let (mut naka_conf, _miner_account) = naka_neon_integration_conf(None); let http_origin = format!("http://{}", &naka_conf.node.rpc_bind); naka_conf.miner.wait_on_interim_blocks = Duration::from_secs(1); let sender_sk = Secp256k1PrivateKey::new(); let sender_signer_sk = Secp256k1PrivateKey::new(); let sender_signer_addr = tests::to_addr(&sender_signer_sk); + let mut signers = TestSigners::new(vec![sender_signer_sk.clone()]); let tenure_count = 5; let inter_blocks_per_tenure = 9; // setup sender + recipient for some test stx transfers @@ -2505,7 +3065,6 @@ fn follower_bootup() { let run_loop_stopper = run_loop.get_termination_switch(); let Counters { blocks_processed, - naka_submitted_vrfs: vrfs_submitted, naka_submitted_commits: commits_submitted, naka_proposed_blocks: proposals_submitted, .. @@ -2523,7 +3082,7 @@ fn follower_bootup() { &blocks_processed, &[stacker_sk], &[sender_signer_sk], - Some(&signers), + &mut Some(&mut signers), &mut btc_regtest_controller, ); @@ -2548,19 +3107,7 @@ fn follower_bootup() { info!("Nakamoto miner started..."); blind_signer(&naka_conf, &signers, proposals_submitted); - // first block wakes up the run loop, wait until a key registration has been submitted. - next_block_and(&mut btc_regtest_controller, 60, || { - let vrf_count = vrfs_submitted.load(Ordering::SeqCst); - Ok(vrf_count >= 1) - }) - .unwrap(); - - // second block should confirm the VRF register, wait until a block commit is submitted - next_block_and(&mut btc_regtest_controller, 60, || { - let commits_count = commits_submitted.load(Ordering::SeqCst); - Ok(commits_count >= 1) - }) - .unwrap(); + wait_for_first_naka_block_commit(60, &commits_submitted); let mut follower_conf = naka_conf.clone(); follower_conf.events_observers.clear(); @@ -2580,6 +3127,7 @@ fn follower_bootup() { follower_conf.node.p2p_bind = format!("{}:{}", &localhost, p2p_port); follower_conf.node.data_url = format!("http://{}:{}", &localhost, rpc_port); follower_conf.node.p2p_address = format!("{}:{}", &localhost, p2p_port); + follower_conf.node.pox_sync_sample_secs = 30; let node_info = get_chain_info(&naka_conf); follower_conf.node.add_bootstrap_node( @@ -2615,44 +3163,123 @@ fn follower_bootup() { // Mine `tenure_count` nakamoto tenures for tenure_ix in 0..tenure_count { + debug!("follower_bootup: Miner runs tenure {}", tenure_ix); let commits_before = commits_submitted.load(Ordering::SeqCst); next_block_and_process_new_stacks_block(&mut btc_regtest_controller, 60, &coord_channel) .unwrap(); let mut last_tip = BlockHeaderHash([0x00; 32]); - let mut last_tip_height = 0; + let mut last_nonce = None; + + debug!( + "follower_bootup: Miner mines interum blocks for tenure {}", + tenure_ix + ); // mine the interim blocks - for interim_block_ix in 0..inter_blocks_per_tenure { + for _ in 0..inter_blocks_per_tenure { let blocks_processed_before = coord_channel .lock() .expect("Mutex poisoned") .get_stacks_blocks_processed(); - // submit a tx so that the miner will mine an extra block - let sender_nonce = tenure_ix * inter_blocks_per_tenure + interim_block_ix; + + let account = loop { + // submit a tx so that the miner will mine an extra block + let Ok(account) = get_account_result(&http_origin, &sender_addr) else { + debug!("follower_bootup: Failed to load miner account"); + thread::sleep(Duration::from_millis(100)); + continue; + }; + break account; + }; + + let sender_nonce = account + .nonce + .max(last_nonce.as_ref().map(|ln| *ln + 1).unwrap_or(0)); let transfer_tx = make_stacks_transfer(&sender_sk, sender_nonce, send_fee, &recipient, send_amt); submit_tx(&http_origin, &transfer_tx); - loop { - let blocks_processed = coord_channel - .lock() - .expect("Mutex poisoned") - .get_stacks_blocks_processed(); - if blocks_processed > blocks_processed_before { - break; - } - thread::sleep(Duration::from_millis(100)); - } + last_nonce = Some(sender_nonce); - let info = get_chain_info_result(&naka_conf).unwrap(); - assert_ne!(info.stacks_tip, last_tip); - assert_ne!(info.stacks_tip_height, last_tip_height); + let tx = StacksTransaction::consensus_deserialize(&mut &transfer_tx[..]).unwrap(); - last_tip = info.stacks_tip; - last_tip_height = info.stacks_tip_height; + debug!("follower_bootup: Miner account: {:?}", &account); + debug!("follower_bootup: Miner sent {}: {:?}", &tx.txid(), &tx); + + let now = get_epoch_time_secs(); + while get_epoch_time_secs() < now + 10 { + let Ok(info) = get_chain_info_result(&naka_conf) else { + debug!("follower_bootup: Could not get miner chain info"); + thread::sleep(Duration::from_millis(100)); + continue; + }; + + let Ok(follower_info) = get_chain_info_result(&follower_conf) else { + debug!("follower_bootup: Could not get follower chain info"); + thread::sleep(Duration::from_millis(100)); + continue; + }; + + if follower_info.burn_block_height < info.burn_block_height { + debug!("follower_bootup: Follower is behind miner's burnchain view"); + thread::sleep(Duration::from_millis(100)); + continue; + } + + if info.stacks_tip == last_tip { + debug!( + "follower_bootup: Miner stacks tip hasn't changed ({})", + &info.stacks_tip + ); + thread::sleep(Duration::from_millis(100)); + continue; + } + + let blocks_processed = coord_channel + .lock() + .expect("Mutex poisoned") + .get_stacks_blocks_processed(); + + if blocks_processed > blocks_processed_before { + break; + } + + debug!("follower_bootup: No blocks processed yet"); + thread::sleep(Duration::from_millis(100)); + } + + // compare chain tips + loop { + let Ok(info) = get_chain_info_result(&naka_conf) else { + debug!("follower_bootup: failed to load tip info"); + thread::sleep(Duration::from_millis(100)); + continue; + }; + + let Ok(follower_info) = get_chain_info_result(&follower_conf) else { + debug!("follower_bootup: Could not get follower chain info"); + thread::sleep(Duration::from_millis(100)); + continue; + }; + if info.stacks_tip == follower_info.stacks_tip { + debug!( + "follower_bootup: Follower has advanced to miner's tip {}", + &info.stacks_tip + ); + } else { + debug!( + "follower_bootup: Follower has NOT advanced to miner's tip: {} != {}", + &info.stacks_tip, follower_info.stacks_tip + ); + } + + last_tip = info.stacks_tip; + break; + } } + debug!("follower_bootup: Wait for next block-commit"); let start_time = Instant::now(); while commits_submitted.load(Ordering::SeqCst) <= commits_before { if start_time.elapsed() >= Duration::from_secs(20) { @@ -2660,6 +3287,7 @@ fn follower_bootup() { } thread::sleep(Duration::from_millis(100)); } + debug!("follower_bootup: Block commit submitted"); } // load the chain tip, and assert that it is a nakamoto block and at least 30 blocks have advanced in epoch 3 @@ -2718,7 +3346,6 @@ fn stack_stx_burn_op_integration_test() { return; } - let signers = TestSigners::default(); let (mut naka_conf, _miner_account) = naka_neon_integration_conf(None); naka_conf.burnchain.satoshis_per_byte = 2; naka_conf.miner.wait_on_interim_blocks = Duration::from_secs(1); @@ -2729,6 +3356,8 @@ fn stack_stx_burn_op_integration_test() { let signer_sk_2 = Secp256k1PrivateKey::new(); let signer_addr_2 = tests::to_addr(&signer_sk_2); + let mut signers = TestSigners::new(vec![signer_sk_1.clone()]); + let stacker_sk = setup_stacker(&mut naka_conf); test_observer::spawn(); @@ -2751,7 +3380,6 @@ fn stack_stx_burn_op_integration_test() { let run_loop_stopper = run_loop.get_termination_switch(); let Counters { blocks_processed, - naka_submitted_vrfs: vrfs_submitted, naka_submitted_commits: commits_submitted, naka_proposed_blocks: proposals_submitted, .. @@ -2769,7 +3397,7 @@ fn stack_stx_burn_op_integration_test() { &blocks_processed, &[stacker_sk], &[signer_sk_1], - Some(&signers), + &mut Some(&mut signers), &mut btc_regtest_controller, ); @@ -2777,19 +3405,8 @@ fn stack_stx_burn_op_integration_test() { info!("Nakamoto miner started..."); blind_signer(&naka_conf, &signers, proposals_submitted); - // first block wakes up the run loop, wait until a key registration has been submitted. - next_block_and(&mut btc_regtest_controller, 60, || { - let vrf_count = vrfs_submitted.load(Ordering::SeqCst); - Ok(vrf_count >= 1) - }) - .unwrap(); - // second block should confirm the VRF register, wait until a block commit is submitted - next_block_and(&mut btc_regtest_controller, 60, || { - let commits_count = commits_submitted.load(Ordering::SeqCst); - Ok(commits_count >= 1) - }) - .unwrap(); + wait_for_first_naka_block_commit(60, &commits_submitted); let block_height = btc_regtest_controller.get_headers_height(); @@ -3136,3 +3753,3647 @@ fn stack_stx_burn_op_integration_test() { run_loop_thread.join().unwrap(); } + +#[test] +#[ignore] +/// This test spins up a nakamoto-neon node. +/// It starts in Epoch 2.0, mines with `neon_node` to Epoch 3.0, and then switches +/// to Nakamoto operation (activating pox-4 by submitting a stack-stx tx). The BootLoop +/// struct handles the epoch-2/3 tear-down and spin-up. +/// Miner A mines a regular tenure, its last block being block a_x. +/// Miner B starts its tenure, Miner B produces a Stacks block b_0, but miner C submits its block commit before b_0 is broadcasted. +/// Bitcoin block C, containing Miner C's block commit, is mined BEFORE miner C has a chance to update their block commit with b_0's information. +/// This test asserts: +/// * tenure C ignores b_0, and correctly builds off of block a_x. +fn forked_tenure_is_ignored() { + if env::var("BITCOIND_TEST") != Ok("1".into()) { + return; + } + + let (mut naka_conf, _miner_account) = naka_neon_integration_conf(None); + naka_conf.miner.wait_on_interim_blocks = Duration::from_secs(10); + let sender_sk = Secp256k1PrivateKey::new(); + // setup sender + recipient for a test stx transfer + let sender_addr = tests::to_addr(&sender_sk); + let send_amt = 100; + let send_fee = 180; + naka_conf.add_initial_balance( + PrincipalData::from(sender_addr.clone()).to_string(), + send_amt + send_fee, + ); + let sender_signer_sk = Secp256k1PrivateKey::new(); + let sender_signer_addr = tests::to_addr(&sender_signer_sk); + let mut signers = TestSigners::new(vec![sender_signer_sk.clone()]); + let recipient = PrincipalData::from(StacksAddress::burn_address(false)); + naka_conf.add_initial_balance( + PrincipalData::from(sender_signer_addr.clone()).to_string(), + 100000, + ); + let stacker_sk = setup_stacker(&mut naka_conf); + let http_origin = format!("http://{}", &naka_conf.node.rpc_bind); + + test_observer::spawn(); + let observer_port = test_observer::EVENT_OBSERVER_PORT; + naka_conf.events_observers.insert(EventObserverConfig { + endpoint: format!("localhost:{observer_port}"), + events_keys: vec![EventKeyType::AnyEvent, EventKeyType::MinedBlocks], + }); + + let mut btcd_controller = BitcoinCoreController::new(naka_conf.clone()); + btcd_controller + .start_bitcoind() + .expect("Failed starting bitcoind"); + let mut btc_regtest_controller = BitcoinRegtestController::new(naka_conf.clone(), None); + btc_regtest_controller.bootstrap_chain(201); + + let mut run_loop = boot_nakamoto::BootRunLoop::new(naka_conf.clone()).unwrap(); + let run_loop_stopper = run_loop.get_termination_switch(); + let Counters { + blocks_processed, + naka_submitted_commits: commits_submitted, + naka_proposed_blocks: proposals_submitted, + naka_mined_blocks: mined_blocks, + naka_skip_commit_op: test_skip_commit_op, + .. + } = run_loop.counters(); + + let coord_channel = run_loop.coordinator_channels(); + + let run_loop_thread = thread::spawn(move || run_loop.start(None, 0)); + wait_for_runloop(&blocks_processed); + boot_to_epoch_3( + &naka_conf, + &blocks_processed, + &[stacker_sk], + &[sender_signer_sk], + &mut Some(&mut signers), + &mut btc_regtest_controller, + ); + + info!("Bootstrapped to Epoch-3.0 boundary, starting nakamoto miner"); + + let burnchain = naka_conf.get_burnchain(); + let sortdb = burnchain.open_sortition_db(true).unwrap(); + let (chainstate, _) = StacksChainState::open( + naka_conf.is_mainnet(), + naka_conf.burnchain.chain_id, + &naka_conf.get_chainstate_path_str(), + None, + ) + .unwrap(); + + info!("Nakamoto miner started..."); + blind_signer(&naka_conf, &signers, proposals_submitted); + + info!("Starting tenure A."); + wait_for_first_naka_block_commit(60, &commits_submitted); + + // In the next block, the miner should win the tenure and submit a stacks block + let commits_before = commits_submitted.load(Ordering::SeqCst); + let blocks_before = mined_blocks.load(Ordering::SeqCst); + next_block_and(&mut btc_regtest_controller, 60, || { + let commits_count = commits_submitted.load(Ordering::SeqCst); + let blocks_count = mined_blocks.load(Ordering::SeqCst); + Ok(commits_count > commits_before && blocks_count > blocks_before) + }) + .unwrap(); + + let block_tenure_a = NakamotoChainState::get_canonical_block_header(chainstate.db(), &sortdb) + .unwrap() + .unwrap(); + + // For the next tenure, submit the commit op but do not allow any stacks blocks to be broadcasted + TEST_BROADCAST_STALL.lock().unwrap().replace(true); + let blocks_before = mined_blocks.load(Ordering::SeqCst); + let commits_before = commits_submitted.load(Ordering::SeqCst); + info!("Starting tenure B."); + next_block_and(&mut btc_regtest_controller, 60, || { + let commits_count = commits_submitted.load(Ordering::SeqCst); + Ok(commits_count > commits_before) + }) + .unwrap(); + signer_vote_if_needed( + &btc_regtest_controller, + &naka_conf, + &[sender_signer_sk], + &signers, + ); + + info!("Commit op is submitted; unpause tenure B's block"); + + // Unpause the broadcast of Tenure B's block, do not submit commits. + test_skip_commit_op.0.lock().unwrap().replace(true); + TEST_BROADCAST_STALL.lock().unwrap().replace(false); + + // Wait for a stacks block to be broadcasted + let start_time = Instant::now(); + while mined_blocks.load(Ordering::SeqCst) <= blocks_before { + assert!( + start_time.elapsed() < Duration::from_secs(30), + "FAIL: Test timed out while waiting for block production", + ); + thread::sleep(Duration::from_secs(1)); + } + + info!("Tenure B broadcasted a block. Issue the next bitcon block and unstall block commits."); + let block_tenure_b = NakamotoChainState::get_canonical_block_header(chainstate.db(), &sortdb) + .unwrap() + .unwrap(); + let blocks = test_observer::get_mined_nakamoto_blocks(); + let block_b = blocks.last().unwrap(); + + info!("Starting tenure C."); + // Submit a block commit op for tenure C + let commits_before = commits_submitted.load(Ordering::SeqCst); + let blocks_before = mined_blocks.load(Ordering::SeqCst); + next_block_and(&mut btc_regtest_controller, 60, || { + test_skip_commit_op.0.lock().unwrap().replace(false); + let commits_count = commits_submitted.load(Ordering::SeqCst); + let blocks_count = mined_blocks.load(Ordering::SeqCst); + Ok(commits_count > commits_before && blocks_count > blocks_before) + }) + .unwrap(); + + info!("Tenure C produced a block!"); + let block_tenure_c = NakamotoChainState::get_canonical_block_header(chainstate.db(), &sortdb) + .unwrap() + .unwrap(); + let blocks = test_observer::get_mined_nakamoto_blocks(); + let block_c = blocks.last().unwrap(); + + // Now let's produce a second block for tenure C and ensure it builds off of block C. + let blocks_before = mined_blocks.load(Ordering::SeqCst); + let start_time = Instant::now(); + + // submit a tx so that the miner will mine an extra block + let sender_nonce = 0; + let transfer_tx = + make_stacks_transfer(&sender_sk, sender_nonce, send_fee, &recipient, send_amt); + let tx = submit_tx(&http_origin, &transfer_tx); + + info!("Submitted tx {tx} in Tenure C to mine a second block"); + while mined_blocks.load(Ordering::SeqCst) <= blocks_before { + assert!( + start_time.elapsed() < Duration::from_secs(30), + "FAIL: Test timed out while waiting for block production", + ); + thread::sleep(Duration::from_secs(1)); + } + + info!("Tenure C produced a second block!"); + + let block_2_tenure_c = NakamotoChainState::get_canonical_block_header(chainstate.db(), &sortdb) + .unwrap() + .unwrap(); + let blocks = test_observer::get_mined_nakamoto_blocks(); + let block_2_c = blocks.last().unwrap(); + + info!("Starting tenure D."); + // Submit a block commit op for tenure D and mine a stacks block + let commits_before = commits_submitted.load(Ordering::SeqCst); + let blocks_before = mined_blocks.load(Ordering::SeqCst); + next_block_and(&mut btc_regtest_controller, 60, || { + let commits_count = commits_submitted.load(Ordering::SeqCst); + let blocks_count = mined_blocks.load(Ordering::SeqCst); + Ok(commits_count > commits_before && blocks_count > blocks_before) + }) + .unwrap(); + + let block_tenure_d = NakamotoChainState::get_canonical_block_header(chainstate.db(), &sortdb) + .unwrap() + .unwrap(); + let blocks = test_observer::get_mined_nakamoto_blocks(); + let block_d = blocks.last().unwrap(); + assert_ne!(block_tenure_b, block_tenure_a); + assert_ne!(block_tenure_b, block_tenure_c); + assert_ne!(block_tenure_c, block_tenure_a); + + // Block B was built atop block A + assert_eq!( + block_tenure_b.stacks_block_height, + block_tenure_a.stacks_block_height + 1 + ); + assert_eq!( + block_b.parent_block_id, + block_tenure_a.index_block_hash().to_string() + ); + + // Block C was built AFTER Block B was built, but BEFORE it was broadcasted, so it should be built off of Block A + assert_eq!( + block_tenure_c.stacks_block_height, + block_tenure_a.stacks_block_height + 1 + ); + assert_eq!( + block_c.parent_block_id, + block_tenure_a.index_block_hash().to_string() + ); + + assert_ne!(block_tenure_c, block_2_tenure_c); + assert_ne!(block_2_tenure_c, block_tenure_d); + assert_ne!(block_tenure_c, block_tenure_d); + + // Second block of tenure C builds off of block C + assert_eq!( + block_2_tenure_c.stacks_block_height, + block_tenure_c.stacks_block_height + 1, + ); + assert_eq!( + block_2_c.parent_block_id, + block_tenure_c.index_block_hash().to_string() + ); + + // Tenure D builds off of the second block of tenure C + assert_eq!( + block_tenure_d.stacks_block_height, + block_2_tenure_c.stacks_block_height + 1, + ); + assert_eq!( + block_d.parent_block_id, + block_2_tenure_c.index_block_hash().to_string() + ); + + coord_channel + .lock() + .expect("Mutex poisoned") + .stop_chains_coordinator(); + run_loop_stopper.store(false, Ordering::SeqCst); + + run_loop_thread.join().unwrap(); +} + +#[test] +#[ignore] +/// This test spins up a nakamoto-neon node. +/// It starts in Epoch 2.0, mines with `neon_node` to Epoch 3.0, and then switches +/// to Nakamoto operation (activating pox-4 by submitting a stack-stx tx). The BootLoop +/// struct handles the epoch-2/3 tear-down and spin-up. +/// This test makes three assertions: +/// * 5 tenures are mined after 3.0 starts +/// * Each tenure has 10 blocks (the coinbase block and 9 interim blocks) +/// * Verifies the block heights of the blocks mined +fn check_block_heights() { + if env::var("BITCOIND_TEST") != Ok("1".into()) { + return; + } + + let mut signers = TestSigners::default(); + let (mut naka_conf, _miner_account) = naka_neon_integration_conf(None); + let http_origin = format!("http://{}", &naka_conf.node.rpc_bind); + naka_conf.miner.wait_on_interim_blocks = Duration::from_secs(1); + let sender_sk = Secp256k1PrivateKey::new(); + let sender_signer_sk = Secp256k1PrivateKey::new(); + let sender_signer_addr = tests::to_addr(&sender_signer_sk); + let tenure_count = 5; + let inter_blocks_per_tenure = 9; + // setup sender + recipient for some test stx transfers + // these are necessary for the interim blocks to get mined at all + let sender_addr = tests::to_addr(&sender_sk); + let send_amt = 100; + let send_fee = 180; + let deploy_fee = 3000; + naka_conf.add_initial_balance( + PrincipalData::from(sender_addr.clone()).to_string(), + 3 * deploy_fee + (send_amt + send_fee) * tenure_count * inter_blocks_per_tenure, + ); + naka_conf.add_initial_balance( + PrincipalData::from(sender_signer_addr.clone()).to_string(), + 100000, + ); + let recipient = PrincipalData::from(StacksAddress::burn_address(false)); + let stacker_sk = setup_stacker(&mut naka_conf); + + let mut btcd_controller = BitcoinCoreController::new(naka_conf.clone()); + btcd_controller + .start_bitcoind() + .expect("Failed starting bitcoind"); + let mut btc_regtest_controller = BitcoinRegtestController::new(naka_conf.clone(), None); + btc_regtest_controller.bootstrap_chain(201); + + let mut run_loop = boot_nakamoto::BootRunLoop::new(naka_conf.clone()).unwrap(); + let run_loop_stopper = run_loop.get_termination_switch(); + let Counters { + blocks_processed, + naka_submitted_commits: commits_submitted, + naka_proposed_blocks: proposals_submitted, + .. + } = run_loop.counters(); + + let coord_channel = run_loop.coordinator_channels(); + + let run_loop_thread = thread::Builder::new() + .name("run_loop".into()) + .spawn(move || run_loop.start(None, 0)) + .unwrap(); + wait_for_runloop(&blocks_processed); + + let mut sender_nonce = 0; + + // Deploy this version with the Clarity 1 / 2 before epoch 3 + let contract0_name = "test-contract-0"; + let contract_clarity1 = + "(define-read-only (get-heights) { burn-block-height: burn-block-height, block-height: block-height })"; + + let contract_tx0 = make_contract_publish( + &sender_sk, + sender_nonce, + deploy_fee, + contract0_name, + contract_clarity1, + ); + sender_nonce += 1; + submit_tx(&http_origin, &contract_tx0); + + boot_to_epoch_3( + &naka_conf, + &blocks_processed, + &[stacker_sk], + &[sender_signer_sk], + &mut Some(&mut signers), + &mut btc_regtest_controller, + ); + + info!("Bootstrapped to Epoch-3.0 boundary, starting nakamoto miner"); + + let burnchain = naka_conf.get_burnchain(); + let sortdb = burnchain.open_sortition_db(true).unwrap(); + let (chainstate, _) = StacksChainState::open( + naka_conf.is_mainnet(), + naka_conf.burnchain.chain_id, + &naka_conf.get_chainstate_path_str(), + None, + ) + .unwrap(); + + let block_height_pre_3_0 = + NakamotoChainState::get_canonical_block_header(chainstate.db(), &sortdb) + .unwrap() + .unwrap() + .stacks_block_height; + + info!("Nakamoto miner started..."); + blind_signer(&naka_conf, &signers, proposals_submitted); + + let heights0_value = call_read_only( + &naka_conf, + &sender_addr, + contract0_name, + "get-heights", + vec![], + ); + let preheights = heights0_value.expect_tuple().unwrap(); + info!("Heights from pre-epoch 3.0: {}", preheights); + + wait_for_first_naka_block_commit(60, &commits_submitted); + + let info = get_chain_info_result(&naka_conf).unwrap(); + info!("Chain info: {:?}", info); + let mut last_burn_block_height; + let mut last_stacks_block_height = info.stacks_tip_height as u128; + let mut last_tenure_height = last_stacks_block_height as u128; + + let heights0_value = call_read_only( + &naka_conf, + &sender_addr, + contract0_name, + "get-heights", + vec![], + ); + let heights0 = heights0_value.expect_tuple().unwrap(); + info!("Heights from epoch 3.0 start: {}", heights0); + assert_eq!( + heights0.get("burn-block-height"), + preheights.get("burn-block-height"), + "Burn block height should match" + ); + assert_eq!( + heights0 + .get("block-height") + .unwrap() + .clone() + .expect_u128() + .unwrap(), + last_stacks_block_height, + "Stacks block height should match" + ); + + // This version uses the Clarity 1 / 2 keywords + let contract1_name = "test-contract-1"; + let contract_tx1 = make_contract_publish_versioned( + &sender_sk, + sender_nonce, + deploy_fee, + contract1_name, + contract_clarity1, + Some(ClarityVersion::Clarity2), + ); + sender_nonce += 1; + submit_tx(&http_origin, &contract_tx1); + + // This version uses the Clarity 3 keywords + let contract3_name = "test-contract-3"; + let contract_clarity3 = + "(define-read-only (get-heights) { burn-block-height: burn-block-height, stacks-block-height: stacks-block-height, tenure-height: tenure-height })"; + + let contract_tx3 = make_contract_publish( + &sender_sk, + sender_nonce, + deploy_fee, + contract3_name, + contract_clarity3, + ); + sender_nonce += 1; + submit_tx(&http_origin, &contract_tx3); + + // Mine `tenure_count` nakamoto tenures + for tenure_ix in 0..tenure_count { + info!("Mining tenure {}", tenure_ix); + let commits_before = commits_submitted.load(Ordering::SeqCst); + next_block_and_process_new_stacks_block(&mut btc_regtest_controller, 60, &coord_channel) + .unwrap(); + + let heights1_value = call_read_only( + &naka_conf, + &sender_addr, + contract1_name, + "get-heights", + vec![], + ); + let heights1 = heights1_value.expect_tuple().unwrap(); + info!("Heights from Clarity 1: {}", heights1); + + let heights3_value = call_read_only( + &naka_conf, + &sender_addr, + contract3_name, + "get-heights", + vec![], + ); + let heights3 = heights3_value.expect_tuple().unwrap(); + info!("Heights from Clarity 3: {}", heights3); + + let bbh1 = heights1 + .get("burn-block-height") + .unwrap() + .clone() + .expect_u128() + .unwrap(); + let bbh3 = heights3 + .get("burn-block-height") + .unwrap() + .clone() + .expect_u128() + .unwrap(); + assert_eq!(bbh1, bbh3, "Burn block heights should match"); + last_burn_block_height = bbh1; + + let bh1 = heights1 + .get("block-height") + .unwrap() + .clone() + .expect_u128() + .unwrap(); + let bh3 = heights3 + .get("tenure-height") + .unwrap() + .clone() + .expect_u128() + .unwrap(); + assert_eq!( + bh1, bh3, + "Clarity 2 block-height should match Clarity 3 tenure-height" + ); + assert_eq!( + bh1, + last_tenure_height + 1, + "Tenure height should have incremented" + ); + last_tenure_height = bh1; + + let sbh = heights3 + .get("stacks-block-height") + .unwrap() + .clone() + .expect_u128() + .unwrap(); + assert_eq!( + sbh, + last_stacks_block_height + 1, + "Stacks block heights should have incremented" + ); + last_stacks_block_height = sbh; + + // mine the interim blocks + for interim_block_ix in 0..inter_blocks_per_tenure { + info!("Mining interim block {interim_block_ix}"); + let blocks_processed_before = coord_channel + .lock() + .expect("Mutex poisoned") + .get_stacks_blocks_processed(); + // submit a tx so that the miner will mine an extra block + let transfer_tx = + make_stacks_transfer(&sender_sk, sender_nonce, send_fee, &recipient, send_amt); + sender_nonce += 1; + submit_tx(&http_origin, &transfer_tx); + + loop { + let blocks_processed = coord_channel + .lock() + .expect("Mutex poisoned") + .get_stacks_blocks_processed(); + if blocks_processed > blocks_processed_before { + break; + } + thread::sleep(Duration::from_millis(100)); + } + + let heights1_value = call_read_only( + &naka_conf, + &sender_addr, + contract1_name, + "get-heights", + vec![], + ); + let heights1 = heights1_value.expect_tuple().unwrap(); + info!("Heights from Clarity 1: {}", heights1); + + let heights3_value = call_read_only( + &naka_conf, + &sender_addr, + contract3_name, + "get-heights", + vec![], + ); + let heights3 = heights3_value.expect_tuple().unwrap(); + info!("Heights from Clarity 3: {}", heights3); + + let bbh1 = heights1 + .get("burn-block-height") + .unwrap() + .clone() + .expect_u128() + .unwrap(); + let bbh3 = heights3 + .get("burn-block-height") + .unwrap() + .clone() + .expect_u128() + .unwrap(); + assert_eq!(bbh1, bbh3, "Burn block heights should match"); + assert_eq!( + bbh1, last_burn_block_height, + "Burn block heights should not have incremented" + ); + + let bh1 = heights1 + .get("block-height") + .unwrap() + .clone() + .expect_u128() + .unwrap(); + let bh3 = heights3 + .get("tenure-height") + .unwrap() + .clone() + .expect_u128() + .unwrap(); + assert_eq!( + bh1, bh3, + "Clarity 2 block-height should match Clarity 3 tenure-height" + ); + assert_eq!( + bh1, last_tenure_height, + "Tenure height should not have changed" + ); + + let sbh = heights3 + .get("stacks-block-height") + .unwrap() + .clone() + .expect_u128() + .unwrap(); + assert_eq!( + sbh, + last_stacks_block_height + 1, + "Stacks block heights should have incremented" + ); + last_stacks_block_height = sbh; + } + + let start_time = Instant::now(); + while commits_submitted.load(Ordering::SeqCst) <= commits_before { + if start_time.elapsed() >= Duration::from_secs(20) { + panic!("Timed out waiting for block-commit"); + } + thread::sleep(Duration::from_millis(100)); + } + } + + // load the chain tip, and assert that it is a nakamoto block and at least 30 blocks have advanced in epoch 3 + let tip = NakamotoChainState::get_canonical_block_header(chainstate.db(), &sortdb) + .unwrap() + .unwrap(); + info!( + "Latest tip"; + "height" => tip.stacks_block_height, + "is_nakamoto" => tip.anchored_header.as_stacks_nakamoto().is_some(), + ); + + assert!(tip.anchored_header.as_stacks_nakamoto().is_some()); + assert_eq!( + tip.stacks_block_height, + block_height_pre_3_0 + ((inter_blocks_per_tenure + 1) * tenure_count), + "Should have mined (1 + interim_blocks_per_tenure) * tenure_count nakamoto blocks" + ); + + coord_channel + .lock() + .expect("Mutex poisoned") + .stop_chains_coordinator(); + run_loop_stopper.store(false, Ordering::SeqCst); + + run_loop_thread.join().unwrap(); +} + +/// Test config parameter `nakamoto_attempt_time_ms` +#[test] +#[ignore] +fn nakamoto_attempt_time() { + if env::var("BITCOIND_TEST") != Ok("1".into()) { + return; + } + + let mut signers = TestSigners::default(); + let (mut naka_conf, _miner_account) = naka_neon_integration_conf(None); + let password = "12345".to_string(); + naka_conf.connection_options.block_proposal_token = Some(password.clone()); + // Use fixed timing params for this test + let nakamoto_attempt_time_ms = 20_000; + naka_conf.miner.nakamoto_attempt_time_ms = nakamoto_attempt_time_ms; + let stacker_sk = setup_stacker(&mut naka_conf); + + let sender_sk = Secp256k1PrivateKey::new(); + let sender_addr = tests::to_addr(&sender_sk); + naka_conf.add_initial_balance( + PrincipalData::from(sender_addr.clone()).to_string(), + 1_000_000_000, + ); + + let sender_signer_sk = Secp256k1PrivateKey::new(); + let sender_signer_addr = tests::to_addr(&sender_signer_sk); + naka_conf.add_initial_balance( + PrincipalData::from(sender_signer_addr.clone()).to_string(), + 100_000, + ); + + let recipient = PrincipalData::from(StacksAddress::burn_address(false)); + let http_origin = format!("http://{}", &naka_conf.node.rpc_bind); + + // We'll need a lot of accounts for one subtest to avoid MAXIMUM_MEMPOOL_TX_CHAINING + struct Account { + nonce: u64, + privk: Secp256k1PrivateKey, + _address: StacksAddress, + } + let num_accounts = 1_000; + let init_account_balance = 1_000_000_000; + let account_keys = add_initial_balances(&mut naka_conf, num_accounts, init_account_balance); + let mut account = account_keys + .into_iter() + .map(|privk| { + let _address = tests::to_addr(&privk); + Account { + nonce: 0, + privk, + _address, + } + }) + .collect::>(); + + // only subscribe to the block proposal events + test_observer::spawn(); + let observer_port = test_observer::EVENT_OBSERVER_PORT; + naka_conf.events_observers.insert(EventObserverConfig { + endpoint: format!("localhost:{observer_port}"), + events_keys: vec![EventKeyType::BlockProposal], + }); + + let mut btcd_controller = BitcoinCoreController::new(naka_conf.clone()); + btcd_controller + .start_bitcoind() + .expect("Failed starting bitcoind"); + let mut btc_regtest_controller = BitcoinRegtestController::new(naka_conf.clone(), None); + btc_regtest_controller.bootstrap_chain(201); + + let mut run_loop = boot_nakamoto::BootRunLoop::new(naka_conf.clone()).unwrap(); + let run_loop_stopper = run_loop.get_termination_switch(); + let Counters { + blocks_processed, + naka_submitted_commits: commits_submitted, + naka_proposed_blocks: proposals_submitted, + .. + } = run_loop.counters(); + + let coord_channel = run_loop.coordinator_channels(); + + let run_loop_thread = thread::spawn(move || run_loop.start(None, 0)); + wait_for_runloop(&blocks_processed); + boot_to_epoch_3( + &naka_conf, + &blocks_processed, + &[stacker_sk], + &[sender_signer_sk], + &mut Some(&mut signers), + &mut btc_regtest_controller, + ); + + info!("Bootstrapped to Epoch-3.0 boundary, starting nakamoto miner"); + blind_signer(&naka_conf, &signers, proposals_submitted); + + let burnchain = naka_conf.get_burnchain(); + let sortdb = burnchain.open_sortition_db(true).unwrap(); + let (chainstate, _) = StacksChainState::open( + naka_conf.is_mainnet(), + naka_conf.burnchain.chain_id, + &naka_conf.get_chainstate_path_str(), + None, + ) + .unwrap(); + + let _block_height_pre_3_0 = + NakamotoChainState::get_canonical_block_header(chainstate.db(), &sortdb) + .unwrap() + .unwrap() + .stacks_block_height; + + info!("Nakamoto miner started..."); + + wait_for_first_naka_block_commit(60, &commits_submitted); + + // Mine 3 nakamoto tenures + for _ in 0..3 { + next_block_and_mine_commit( + &mut btc_regtest_controller, + 60, + &coord_channel, + &commits_submitted, + ) + .unwrap(); + } + + // TODO (hack) instantiate the sortdb in the burnchain + _ = btc_regtest_controller.sortdb_mut(); + + // ----- Setup boilerplate finished, test block proposal API endpoint ----- + + let tenure_count = 2; + let inter_blocks_per_tenure = 3; + + info!("Begin subtest 1"); + + // Subtest 1 + // Mine nakamoto tenures with a few transactions + // Blocks should be produced at least every 20 seconds + for _ in 0..tenure_count { + let commits_before = commits_submitted.load(Ordering::SeqCst); + next_block_and_process_new_stacks_block(&mut btc_regtest_controller, 60, &coord_channel) + .unwrap(); + + let mut last_tip = BlockHeaderHash([0x00; 32]); + let mut last_tip_height = 0; + + // mine the interim blocks + for tenure_count in 0..inter_blocks_per_tenure { + debug!("nakamoto_attempt_time: begin tenure {}", tenure_count); + + let blocks_processed_before = coord_channel + .lock() + .expect("Mutex poisoned") + .get_stacks_blocks_processed(); + + let txs_per_block = 3; + let tx_fee = 500; + let amount = 500; + + let account = loop { + // submit a tx so that the miner will mine an extra block + let Ok(account) = get_account_result(&http_origin, &sender_addr) else { + debug!("nakamoto_attempt_time: Failed to load miner account"); + thread::sleep(Duration::from_millis(100)); + continue; + }; + break account; + }; + + let mut sender_nonce = account.nonce; + for _ in 0..txs_per_block { + let transfer_tx = + make_stacks_transfer(&sender_sk, sender_nonce, tx_fee, &recipient, amount); + sender_nonce += 1; + submit_tx(&http_origin, &transfer_tx); + } + + // Miner should have made a new block by now + let wait_start = Instant::now(); + loop { + let blocks_processed = coord_channel + .lock() + .expect("Mutex poisoned") + .get_stacks_blocks_processed(); + if blocks_processed > blocks_processed_before { + break; + } + // wait a little longer than what the max block time should be + if wait_start.elapsed() > Duration::from_millis(nakamoto_attempt_time_ms + 100) { + panic!( + "A block should have been produced within {nakamoto_attempt_time_ms} ms" + ); + } + thread::sleep(Duration::from_secs(1)); + } + + let info = get_chain_info_result(&naka_conf).unwrap(); + assert_ne!(info.stacks_tip, last_tip); + assert_ne!(info.stacks_tip_height, last_tip_height); + + last_tip = info.stacks_tip; + last_tip_height = info.stacks_tip_height; + } + + let start_time = Instant::now(); + while commits_submitted.load(Ordering::SeqCst) <= commits_before { + if start_time.elapsed() >= Duration::from_secs(20) { + panic!("Timed out waiting for block-commit"); + } + thread::sleep(Duration::from_millis(100)); + } + } + + info!("Begin subtest 2"); + + // Subtest 2 + // Confirm that no blocks are mined if there are no transactions + for _ in 0..2 { + let blocks_processed_before = coord_channel + .lock() + .expect("Mutex poisoned") + .get_stacks_blocks_processed(); + + let info_before = get_chain_info_result(&naka_conf).unwrap(); + + // Wait long enough for a block to be mined + thread::sleep(Duration::from_millis(nakamoto_attempt_time_ms * 2)); + + let blocks_processed = coord_channel + .lock() + .expect("Mutex poisoned") + .get_stacks_blocks_processed(); + + let info = get_chain_info_result(&naka_conf).unwrap(); + + // Assert that no block was mined while waiting + assert_eq!(blocks_processed, blocks_processed_before); + assert_eq!(info.stacks_tip, info_before.stacks_tip); + assert_eq!(info.stacks_tip_height, info_before.stacks_tip_height); + } + + info!("Begin subtest 3"); + + // Subtest 3 + // Add more than `nakamoto_attempt_time_ms` worth of transactions into mempool + // Multiple blocks should be mined + let info_before = get_chain_info_result(&naka_conf).unwrap(); + + let blocks_processed_before = coord_channel + .lock() + .expect("Mutex poisoned") + .get_stacks_blocks_processed(); + + let tx_limit = 10000; + let tx_fee = 500; + let amount = 500; + let mut tx_total_size = 0; + let mut tx_count = 0; + let mut acct_idx = 0; + + // Submit max # of txs from each account to reach tx_limit + 'submit_txs: loop { + let acct = &mut account[acct_idx]; + for _ in 0..MAXIMUM_MEMPOOL_TX_CHAINING { + let transfer_tx = + make_stacks_transfer(&acct.privk, acct.nonce, tx_fee, &recipient, amount); + submit_tx(&http_origin, &transfer_tx); + tx_total_size += transfer_tx.len(); + tx_count += 1; + acct.nonce += 1; + if tx_count >= tx_limit { + break 'submit_txs; + } + info!( + "nakamoto_times_ms: on account {}; sent {} txs so far (out of {})", + acct_idx, tx_count, tx_limit + ); + } + acct_idx += 1; + } + + info!("Subtest 3 sent all transactions"); + + // Make sure that these transactions *could* fit into a single block + assert!(tx_total_size < MAX_BLOCK_LEN as usize); + + // Wait long enough for 2 blocks to be made + thread::sleep(Duration::from_millis(nakamoto_attempt_time_ms * 2 + 100)); + + // Check that 2 blocks were made + let blocks_processed = coord_channel + .lock() + .expect("Mutex poisoned") + .get_stacks_blocks_processed(); + + let blocks_mined = blocks_processed - blocks_processed_before; + assert!(blocks_mined > 2); + + let info = get_chain_info_result(&naka_conf).unwrap(); + assert_ne!(info.stacks_tip, info_before.stacks_tip); + assert_ne!(info.stacks_tip_height, info_before.stacks_tip_height); + + // ----- Clean up ----- + coord_channel + .lock() + .expect("Mutex poisoned") + .stop_chains_coordinator(); + run_loop_stopper.store(false, Ordering::SeqCst); + + run_loop_thread.join().unwrap(); +} + +#[test] +#[ignore] +/// This test is testing the burn state of the Stacks blocks. In Stacks 2.x, +/// the burn block state accessed in a Clarity contract is the burn block of +/// the block's parent, since the block is built before its burn block is +/// mined. In Nakamoto, there is no longer this race condition, so Clarity +/// contracts access the state of the current burn block. +/// We should verify: +/// - `burn-block-height` in epoch 3.x is the burn block of the Stacks block +/// - `get-burn-block-info` is able to access info of the current burn block +/// in epoch 3.x +fn clarity_burn_state() { + if env::var("BITCOIND_TEST") != Ok("1".into()) { + return; + } + + let mut signers = TestSigners::default(); + let (mut naka_conf, _miner_account) = naka_neon_integration_conf(None); + let http_origin = format!("http://{}", &naka_conf.node.rpc_bind); + naka_conf.miner.wait_on_interim_blocks = Duration::from_secs(1); + let sender_sk = Secp256k1PrivateKey::new(); + let sender_signer_sk = Secp256k1PrivateKey::new(); + let sender_signer_addr = tests::to_addr(&sender_signer_sk); + let tenure_count = 5; + let inter_blocks_per_tenure = 9; + // setup sender + recipient for some test stx transfers + // these are necessary for the interim blocks to get mined at all + let sender_addr = tests::to_addr(&sender_sk); + let tx_fee = 1000; + let deploy_fee = 3000; + naka_conf.add_initial_balance( + PrincipalData::from(sender_addr.clone()).to_string(), + deploy_fee + tx_fee * tenure_count + tx_fee * tenure_count * inter_blocks_per_tenure, + ); + naka_conf.add_initial_balance( + PrincipalData::from(sender_signer_addr.clone()).to_string(), + 100000, + ); + let stacker_sk = setup_stacker(&mut naka_conf); + + test_observer::spawn(); + let observer_port = test_observer::EVENT_OBSERVER_PORT; + naka_conf.events_observers.insert(EventObserverConfig { + endpoint: format!("localhost:{observer_port}"), + events_keys: vec![EventKeyType::MinedBlocks], + }); + + let mut btcd_controller = BitcoinCoreController::new(naka_conf.clone()); + btcd_controller + .start_bitcoind() + .expect("Failed starting bitcoind"); + let mut btc_regtest_controller = BitcoinRegtestController::new(naka_conf.clone(), None); + btc_regtest_controller.bootstrap_chain(201); + + let mut run_loop = boot_nakamoto::BootRunLoop::new(naka_conf.clone()).unwrap(); + let run_loop_stopper = run_loop.get_termination_switch(); + let Counters { + blocks_processed, + naka_submitted_commits: commits_submitted, + naka_proposed_blocks: proposals_submitted, + .. + } = run_loop.counters(); + + let coord_channel = run_loop.coordinator_channels(); + + let run_loop_thread = thread::Builder::new() + .name("run_loop".into()) + .spawn(move || run_loop.start(None, 0)) + .unwrap(); + wait_for_runloop(&blocks_processed); + boot_to_epoch_3( + &naka_conf, + &blocks_processed, + &[stacker_sk], + &[sender_signer_sk], + &mut Some(&mut signers), + &mut btc_regtest_controller, + ); + + info!("Bootstrapped to Epoch-3.0 boundary, starting nakamoto miner"); + + info!("Nakamoto miner started..."); + blind_signer(&naka_conf, &signers, proposals_submitted); + + wait_for_first_naka_block_commit(60, &commits_submitted); + + let mut sender_nonce = 0; + + // This version uses the Clarity 1 / 2 keywords + let contract_name = "test-contract"; + let contract = r#" + (define-read-only (foo (expected-height uint)) + (begin + (asserts! (is-eq expected-height burn-block-height) (err burn-block-height)) + (asserts! (is-some (get-burn-block-info? header-hash burn-block-height)) (err u0)) + (ok true) + ) + ) + (define-public (bar (expected-height uint)) + (foo expected-height) + ) + "#; + + let contract_tx = make_contract_publish( + &sender_sk, + sender_nonce, + deploy_fee, + contract_name, + contract, + ); + sender_nonce += 1; + submit_tx(&http_origin, &contract_tx); + + let mut burn_block_height = 0; + + // Mine `tenure_count` nakamoto tenures + for tenure_ix in 0..tenure_count { + info!("Mining tenure {}", tenure_ix); + + // Don't submit this tx on the first iteration, because the contract is not published yet. + if tenure_ix > 0 { + // Call the read-only function and see if we see the correct burn block height + let result = call_read_only( + &naka_conf, + &sender_addr, + contract_name, + "foo", + vec![&Value::UInt(burn_block_height)], + ); + result.expect_result_ok().expect("Read-only call failed"); + + // Submit a tx for the next block (the next block will be a new tenure, so the burn block height will increment) + let call_tx = tests::make_contract_call( + &sender_sk, + sender_nonce, + tx_fee, + &sender_addr, + contract_name, + "bar", + &[Value::UInt(burn_block_height + 1)], + ); + sender_nonce += 1; + submit_tx(&http_origin, &call_tx); + } + + let commits_before = commits_submitted.load(Ordering::SeqCst); + next_block_and_process_new_stacks_block(&mut btc_regtest_controller, 60, &coord_channel) + .unwrap(); + + let info = get_chain_info(&naka_conf); + burn_block_height = info.burn_block_height as u128; + info!("Expecting burn block height to be {}", burn_block_height); + + // Assert that the contract call was successful + test_observer::get_mined_nakamoto_blocks() + .last() + .unwrap() + .tx_events + .iter() + .for_each(|event| match event { + TransactionEvent::Success(TransactionSuccessEvent { result, fee, .. }) => { + // Ignore coinbase and tenure transactions + if *fee == 0 { + return; + } + + info!("Contract call result: {}", result); + result.clone().expect_result_ok().expect("Ok result"); + } + _ => { + info!("Unsuccessful event: {:?}", event); + panic!("Expected a successful transaction"); + } + }); + + // mine the interim blocks + for interim_block_ix in 0..inter_blocks_per_tenure { + info!("Mining interim block {interim_block_ix}"); + let blocks_processed_before = coord_channel + .lock() + .expect("Mutex poisoned") + .get_stacks_blocks_processed(); + + // Call the read-only function and see if we see the correct burn block height + let expected_height = Value::UInt(burn_block_height); + let result = call_read_only( + &naka_conf, + &sender_addr, + contract_name, + "foo", + vec![&expected_height], + ); + info!("Read-only result: {:?}", result); + result.expect_result_ok().expect("Read-only call failed"); + + // Submit a tx to trigger the next block + let call_tx = tests::make_contract_call( + &sender_sk, + sender_nonce, + tx_fee, + &sender_addr, + contract_name, + "bar", + &[expected_height], + ); + sender_nonce += 1; + submit_tx(&http_origin, &call_tx); + + loop { + let blocks_processed = coord_channel + .lock() + .expect("Mutex poisoned") + .get_stacks_blocks_processed(); + if blocks_processed > blocks_processed_before { + break; + } + thread::sleep(Duration::from_millis(100)); + } + + // Assert that the contract call was successful + test_observer::get_mined_nakamoto_blocks() + .last() + .unwrap() + .tx_events + .iter() + .for_each(|event| match event { + TransactionEvent::Success(TransactionSuccessEvent { result, .. }) => { + info!("Contract call result: {}", result); + result.clone().expect_result_ok().expect("Ok result"); + } + _ => { + info!("Unsuccessful event: {:?}", event); + panic!("Expected a successful transaction"); + } + }); + } + + let start_time = Instant::now(); + while commits_submitted.load(Ordering::SeqCst) <= commits_before { + if start_time.elapsed() >= Duration::from_secs(20) { + panic!("Timed out waiting for block-commit"); + } + thread::sleep(Duration::from_millis(100)); + } + } + + coord_channel + .lock() + .expect("Mutex poisoned") + .stop_chains_coordinator(); + run_loop_stopper.store(false, Ordering::SeqCst); + + run_loop_thread.join().unwrap(); +} + +#[test] +#[ignore] +fn signer_chainstate() { + if env::var("BITCOIND_TEST") != Ok("1".into()) { + return; + } + + let mut signers = TestSigners::default(); + let (mut naka_conf, _miner_account) = naka_neon_integration_conf(None); + let prom_bind = format!("{}:{}", "127.0.0.1", 6000); + let http_origin = format!("http://{}", &naka_conf.node.rpc_bind); + naka_conf.node.prometheus_bind = Some(prom_bind.clone()); + naka_conf.miner.wait_on_interim_blocks = Duration::from_secs(1); + let sender_sk = Secp256k1PrivateKey::new(); + // setup sender + recipient for a test stx transfer + let sender_addr = tests::to_addr(&sender_sk); + let send_amt = 1000; + let send_fee = 200; + naka_conf.add_initial_balance( + PrincipalData::from(sender_addr.clone()).to_string(), + (send_amt + send_fee) * 20, + ); + let sender_signer_sk = Secp256k1PrivateKey::new(); + let sender_signer_addr = tests::to_addr(&sender_signer_sk); + naka_conf.add_initial_balance( + PrincipalData::from(sender_signer_addr.clone()).to_string(), + 100000, + ); + let recipient = PrincipalData::from(StacksAddress::burn_address(false)); + let stacker_sk = setup_stacker(&mut naka_conf); + + test_observer::spawn(); + let observer_port = test_observer::EVENT_OBSERVER_PORT; + naka_conf.events_observers.insert(EventObserverConfig { + endpoint: format!("localhost:{observer_port}"), + events_keys: vec![EventKeyType::AnyEvent], + }); + + let mut btcd_controller = BitcoinCoreController::new(naka_conf.clone()); + btcd_controller + .start_bitcoind() + .expect("Failed starting bitcoind"); + let mut btc_regtest_controller = BitcoinRegtestController::new(naka_conf.clone(), None); + btc_regtest_controller.bootstrap_chain(201); + + let mut run_loop = boot_nakamoto::BootRunLoop::new(naka_conf.clone()).unwrap(); + let run_loop_stopper = run_loop.get_termination_switch(); + let Counters { + blocks_processed, + naka_submitted_commits: commits_submitted, + naka_proposed_blocks: proposals_submitted, + .. + } = run_loop.counters(); + + let coord_channel = run_loop.coordinator_channels(); + + let run_loop_thread = thread::spawn(move || run_loop.start(None, 0)); + wait_for_runloop(&blocks_processed); + boot_to_epoch_3( + &naka_conf, + &blocks_processed, + &[stacker_sk], + &[sender_signer_sk], + &mut Some(&mut signers), + &mut btc_regtest_controller, + ); + + info!("Bootstrapped to Epoch-3.0 boundary, starting nakamoto miner"); + + let burnchain = naka_conf.get_burnchain(); + let sortdb = burnchain.open_sortition_db(true).unwrap(); + + // query for prometheus metrics + #[cfg(feature = "monitoring_prom")] + { + let (chainstate, _) = StacksChainState::open( + naka_conf.is_mainnet(), + naka_conf.burnchain.chain_id, + &naka_conf.get_chainstate_path_str(), + None, + ) + .unwrap(); + let block_height_pre_3_0 = + NakamotoChainState::get_canonical_block_header(chainstate.db(), &sortdb) + .unwrap() + .unwrap() + .stacks_block_height; + let prom_http_origin = format!("http://{}", prom_bind); + let client = reqwest::blocking::Client::new(); + let res = client + .get(&prom_http_origin) + .send() + .unwrap() + .text() + .unwrap(); + let expected_result = format!("stacks_node_stacks_tip_height {block_height_pre_3_0}"); + assert!(res.contains(&expected_result)); + } + + info!("Nakamoto miner started..."); + blind_signer(&naka_conf, &signers, proposals_submitted.clone()); + + let socket = naka_conf + .node + .rpc_bind + .to_socket_addrs() + .unwrap() + .next() + .unwrap(); + let signer_client = stacks_signer::client::StacksClient::new( + StacksPrivateKey::from_seed(&[0, 1, 2, 3]), + socket, + naka_conf + .connection_options + .block_proposal_token + .clone() + .unwrap_or("".into()), + false, + ); + + wait_for_first_naka_block_commit(60, &commits_submitted); + + let mut signer_db = + SignerDb::new(format!("{}/signer_db_path", naka_conf.node.working_dir)).unwrap(); + + // Mine some nakamoto tenures + // track the last tenure's first block and subsequent blocks so we can + // check that they get rejected by the sortitions_view + let mut last_tenures_proposals: Option<(StacksPublicKey, NakamotoBlock, Vec)> = + None; + // hold the first and last blocks of the first tenure. we'll use this to submit reorging proposals + let mut first_tenure_blocks: Option> = None; + for i in 0..15 { + next_block_and_mine_commit( + &mut btc_regtest_controller, + 60, + &coord_channel, + &commits_submitted, + ) + .unwrap(); + + // this config disallows any reorg due to poorly timed block commits + let proposal_conf = ProposalEvalConfig { + first_proposal_burn_block_timing: Duration::from_secs(0), + block_proposal_timeout: Duration::from_secs(100), + }; + let mut sortitions_view = + SortitionsView::fetch_view(proposal_conf, &signer_client).unwrap(); + + // check the prior tenure's proposals again, confirming that the sortitions_view + // will reject them. + if let Some((ref miner_pk, ref prior_tenure_first, ref prior_tenure_interims)) = + last_tenures_proposals + { + let valid = sortitions_view + .check_proposal(&signer_client, &signer_db, prior_tenure_first, miner_pk) + .unwrap(); + assert!( + !valid, + "Sortitions view should reject proposals from prior tenure" + ); + for block in prior_tenure_interims.iter() { + let valid = sortitions_view + .check_proposal(&signer_client, &signer_db, block, miner_pk) + .unwrap(); + assert!( + !valid, + "Sortitions view should reject proposals from prior tenure" + ); + } + } + + // make sure we're getting a proposal from the current sortition (not 100% guaranteed by + // `next_block_and_mine_commit`) by looping + let time_start = Instant::now(); + let proposal = loop { + let proposal = get_latest_block_proposal(&naka_conf, &sortdb).unwrap(); + if proposal.0.header.consensus_hash == sortitions_view.latest_consensus_hash { + break proposal; + } + if time_start.elapsed() > Duration::from_secs(20) { + panic!("Timed out waiting for block proposal from the current bitcoin block"); + } + thread::sleep(Duration::from_secs(1)); + }; + + let valid = sortitions_view + .check_proposal(&signer_client, &signer_db, &proposal.0, &proposal.1) + .unwrap(); + + assert!( + valid, + "Nakamoto integration test produced invalid block proposal" + ); + let burn_block_height = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()) + .unwrap() + .block_height; + let reward_cycle = burnchain + .block_height_to_reward_cycle(burn_block_height) + .unwrap(); + signer_db + .insert_block(&BlockInfo { + block: proposal.0.clone(), + burn_block_height, + reward_cycle, + vote: None, + valid: Some(true), + signed_over: true, + proposed_time: get_epoch_time_secs(), + signed_self: None, + signed_group: None, + ext: ExtraBlockInfo::None, + }) + .unwrap(); + + let before = proposals_submitted.load(Ordering::SeqCst); + + // submit a tx to trigger an intermediate block + let sender_nonce = i; + let transfer_tx = + make_stacks_transfer(&sender_sk, sender_nonce, send_fee, &recipient, send_amt); + submit_tx(&http_origin, &transfer_tx); + + signer_vote_if_needed( + &btc_regtest_controller, + &naka_conf, + &[sender_signer_sk], + &signers, + ); + + let timer = Instant::now(); + while proposals_submitted.load(Ordering::SeqCst) <= before { + thread::sleep(Duration::from_millis(5)); + if timer.elapsed() > Duration::from_secs(30) { + panic!("Timed out waiting for nakamoto miner to produce intermediate block"); + } + } + + // an intermediate block was produced. check the proposed block + let proposal_interim = get_latest_block_proposal(&naka_conf, &sortdb).unwrap(); + + let valid = sortitions_view + .check_proposal( + &signer_client, + &signer_db, + &proposal_interim.0, + &proposal_interim.1, + ) + .unwrap(); + + assert!( + valid, + "Nakamoto integration test produced invalid block proposal" + ); + // force the view to refresh and check again + + // this config disallows any reorg due to poorly timed block commits + let proposal_conf = ProposalEvalConfig { + first_proposal_burn_block_timing: Duration::from_secs(0), + block_proposal_timeout: Duration::from_secs(100), + }; + let mut sortitions_view = + SortitionsView::fetch_view(proposal_conf, &signer_client).unwrap(); + let valid = sortitions_view + .check_proposal( + &signer_client, + &signer_db, + &proposal_interim.0, + &proposal_interim.1, + ) + .unwrap(); + + assert!( + valid, + "Nakamoto integration test produced invalid block proposal" + ); + + signer_db + .insert_block(&BlockInfo { + block: proposal_interim.0.clone(), + burn_block_height, + reward_cycle, + vote: None, + valid: Some(true), + signed_over: true, + proposed_time: get_epoch_time_secs(), + signed_self: None, + signed_group: None, + ext: ExtraBlockInfo::None, + }) + .unwrap(); + + if first_tenure_blocks.is_none() { + first_tenure_blocks = Some(vec![proposal.0.clone(), proposal_interim.0.clone()]); + } + last_tenures_proposals = Some((proposal.1, proposal.0, vec![proposal_interim.0])); + } + + // now we'll check some specific cases of invalid proposals + // Case: the block doesn't confirm the prior blocks that have been signed. + let last_tenure = &last_tenures_proposals.as_ref().unwrap().1.clone(); + let last_tenure_header = &last_tenure.header; + let miner_sk = naka_conf.miner.mining_key.clone().unwrap(); + let miner_pk = StacksPublicKey::from_private(&miner_sk); + let mut sibling_block_header = NakamotoBlockHeader { + version: 1, + chain_length: last_tenure_header.chain_length, + burn_spent: last_tenure_header.burn_spent, + consensus_hash: last_tenure_header.consensus_hash.clone(), + parent_block_id: last_tenure_header.block_id(), + tx_merkle_root: Sha512Trunc256Sum::from_data(&[0]), + state_index_root: TrieHash([0; 32]), + timestamp: last_tenure_header.timestamp + 1, + miner_signature: MessageSignature([0; 65]), + signer_signature: Vec::new(), + pox_treatment: BitVec::ones(1).unwrap(), + }; + sibling_block_header.sign_miner(&miner_sk).unwrap(); + + let sibling_block = NakamotoBlock { + header: sibling_block_header, + txs: vec![], + }; + + // this config disallows any reorg due to poorly timed block commits + let proposal_conf = ProposalEvalConfig { + first_proposal_burn_block_timing: Duration::from_secs(0), + block_proposal_timeout: Duration::from_secs(100), + }; + let mut sortitions_view = SortitionsView::fetch_view(proposal_conf, &signer_client).unwrap(); + + assert!( + !sortitions_view + .check_proposal(&signer_client, &signer_db, &sibling_block, &miner_pk) + .unwrap(), + "A sibling of a previously approved block must be rejected." + ); + + // Case: the block contains a tenure change, but blocks have already + // been signed in this tenure + let mut sibling_block_header = NakamotoBlockHeader { + version: 1, + chain_length: last_tenure_header.chain_length, + burn_spent: last_tenure_header.burn_spent, + consensus_hash: last_tenure_header.consensus_hash.clone(), + parent_block_id: last_tenure_header.parent_block_id.clone(), + tx_merkle_root: Sha512Trunc256Sum::from_data(&[0]), + state_index_root: TrieHash([0; 32]), + timestamp: last_tenure_header.timestamp + 1, + miner_signature: MessageSignature([0; 65]), + signer_signature: Vec::new(), + pox_treatment: BitVec::ones(1).unwrap(), + }; + sibling_block_header.sign_miner(&miner_sk).unwrap(); + + let sibling_block = NakamotoBlock { + header: sibling_block_header, + txs: vec![ + StacksTransaction { + version: TransactionVersion::Testnet, + chain_id: 1, + auth: TransactionAuth::Standard(TransactionSpendingCondition::Singlesig( + SinglesigSpendingCondition { + hash_mode: SinglesigHashMode::P2PKH, + signer: Hash160([0; 20]), + nonce: 0, + tx_fee: 0, + key_encoding: TransactionPublicKeyEncoding::Compressed, + signature: MessageSignature([0; 65]), + }, + )), + anchor_mode: TransactionAnchorMode::Any, + post_condition_mode: TransactionPostConditionMode::Allow, + post_conditions: vec![], + payload: TransactionPayload::TenureChange( + last_tenure.get_tenure_change_tx_payload().unwrap().clone(), + ), + }, + last_tenure.txs[1].clone(), + ], + }; + + assert!( + !sortitions_view + .check_proposal(&signer_client, &signer_db, &sibling_block, &miner_pk) + .unwrap(), + "A sibling of a previously approved block must be rejected." + ); + + // Case: the block contains a tenure change, but it doesn't confirm all the blocks of the parent tenure + let reorg_to_block = first_tenure_blocks.as_ref().unwrap().first().unwrap(); + let mut sibling_block_header = NakamotoBlockHeader { + version: 1, + chain_length: reorg_to_block.header.chain_length + 1, + burn_spent: reorg_to_block.header.burn_spent, + consensus_hash: last_tenure_header.consensus_hash.clone(), + parent_block_id: reorg_to_block.block_id(), + tx_merkle_root: Sha512Trunc256Sum::from_data(&[0]), + state_index_root: TrieHash([0; 32]), + timestamp: last_tenure_header.timestamp + 1, + miner_signature: MessageSignature([0; 65]), + signer_signature: Vec::new(), + pox_treatment: BitVec::ones(1).unwrap(), + }; + sibling_block_header.sign_miner(&miner_sk).unwrap(); + + let sibling_block = NakamotoBlock { + header: sibling_block_header.clone(), + txs: vec![ + StacksTransaction { + version: TransactionVersion::Testnet, + chain_id: 1, + auth: TransactionAuth::Standard(TransactionSpendingCondition::Singlesig( + SinglesigSpendingCondition { + hash_mode: SinglesigHashMode::P2PKH, + signer: Hash160([0; 20]), + nonce: 0, + tx_fee: 0, + key_encoding: TransactionPublicKeyEncoding::Compressed, + signature: MessageSignature([0; 65]), + }, + )), + anchor_mode: TransactionAnchorMode::Any, + post_condition_mode: TransactionPostConditionMode::Allow, + post_conditions: vec![], + payload: TransactionPayload::TenureChange(TenureChangePayload { + tenure_consensus_hash: sibling_block_header.consensus_hash.clone(), + prev_tenure_consensus_hash: reorg_to_block.header.consensus_hash.clone(), + burn_view_consensus_hash: sibling_block_header.consensus_hash.clone(), + previous_tenure_end: reorg_to_block.block_id(), + previous_tenure_blocks: 1, + cause: stacks::chainstate::stacks::TenureChangeCause::BlockFound, + pubkey_hash: Hash160::from_node_public_key(&miner_pk), + }), + }, + last_tenure.txs[1].clone(), + ], + }; + + assert!( + !sortitions_view + .check_proposal(&signer_client, &signer_db, &sibling_block, &miner_pk) + .unwrap(), + "A sibling of a previously approved block must be rejected." + ); + + // Case: the block contains a tenure change, but the parent tenure is a reorg + let reorg_to_block = first_tenure_blocks.as_ref().unwrap().last().unwrap(); + // make the sortition_view *think* that our block commit pointed at this old tenure + sortitions_view.cur_sortition.parent_tenure_id = reorg_to_block.header.consensus_hash.clone(); + let mut sibling_block_header = NakamotoBlockHeader { + version: 1, + chain_length: reorg_to_block.header.chain_length + 1, + burn_spent: reorg_to_block.header.burn_spent, + consensus_hash: last_tenure_header.consensus_hash.clone(), + parent_block_id: reorg_to_block.block_id(), + tx_merkle_root: Sha512Trunc256Sum::from_data(&[0]), + state_index_root: TrieHash([0; 32]), + timestamp: reorg_to_block.header.timestamp + 1, + miner_signature: MessageSignature([0; 65]), + signer_signature: Vec::new(), + pox_treatment: BitVec::ones(1).unwrap(), + }; + sibling_block_header.sign_miner(&miner_sk).unwrap(); + + let sibling_block = NakamotoBlock { + header: sibling_block_header.clone(), + txs: vec![ + StacksTransaction { + version: TransactionVersion::Testnet, + chain_id: 1, + auth: TransactionAuth::Standard(TransactionSpendingCondition::Singlesig( + SinglesigSpendingCondition { + hash_mode: SinglesigHashMode::P2PKH, + signer: Hash160([0; 20]), + nonce: 0, + tx_fee: 0, + key_encoding: TransactionPublicKeyEncoding::Compressed, + signature: MessageSignature([0; 65]), + }, + )), + anchor_mode: TransactionAnchorMode::Any, + post_condition_mode: TransactionPostConditionMode::Allow, + post_conditions: vec![], + payload: TransactionPayload::TenureChange(TenureChangePayload { + tenure_consensus_hash: sibling_block_header.consensus_hash.clone(), + prev_tenure_consensus_hash: reorg_to_block.header.consensus_hash.clone(), + burn_view_consensus_hash: sibling_block_header.consensus_hash.clone(), + previous_tenure_end: reorg_to_block.block_id(), + previous_tenure_blocks: 1, + cause: stacks::chainstate::stacks::TenureChangeCause::BlockFound, + pubkey_hash: Hash160::from_node_public_key(&miner_pk), + }), + }, + last_tenure.txs[1].clone(), + ], + }; + + assert!( + !sortitions_view + .check_proposal(&signer_client, &signer_db, &sibling_block, &miner_pk) + .unwrap(), + "A sibling of a previously approved block must be rejected." + ); + + let start_sortition = &reorg_to_block.header.consensus_hash; + let stop_sortition = &sortitions_view.cur_sortition.prior_sortition; + // check that the get_tenure_forking_info response is sane + let fork_info = signer_client + .get_tenure_forking_info(start_sortition, stop_sortition) + .unwrap(); + + // it should start and stop with the given inputs (reversed!) + assert_eq!(fork_info.first().unwrap().consensus_hash, *stop_sortition); + assert_eq!(fork_info.last().unwrap().consensus_hash, *start_sortition); + + // every step of the return should be linked to the parent + let mut prior: Option<&TenureForkingInfo> = None; + for step in fork_info.iter().rev() { + if let Some(ref prior) = prior { + assert_eq!(prior.sortition_id, step.parent_sortition_id); + } + prior = Some(step); + } + + // view is stale, if we ever expand this test, sortitions_view should + // be fetched again, so drop it here. + drop(sortitions_view); + + coord_channel + .lock() + .expect("Mutex poisoned") + .stop_chains_coordinator(); + run_loop_stopper.store(false, Ordering::SeqCst); + + run_loop_thread.join().unwrap(); +} + +#[test] +#[ignore] +/// This test spins up a nakamoto-neon node. +/// It starts in Epoch 2.0, mines with `neon_node` to Epoch 3.0, and then switches +/// to Nakamoto operation (activating pox-4 by submitting a stack-stx tx). The BootLoop +/// struct handles the epoch-2/3 tear-down and spin-up. It mines a regular Nakamoto tenure +/// before pausing the commit op to produce an empty sortition, forcing a tenure extend. +/// Commit ops are resumed, and an additional 15 nakamoto tenures mined. +/// This test makes three assertions: +/// * 15 blocks are mined after 3.0 starts. +/// * A transaction submitted to the mempool in 3.0 will be mined in 3.0 +/// * A tenure extend transaction was successfully mined in 3.0 +/// * The final chain tip is a nakamoto block +fn continue_tenure_extend() { + if env::var("BITCOIND_TEST") != Ok("1".into()) { + return; + } + + let mut signers = TestSigners::default(); + let (mut naka_conf, _miner_account) = naka_neon_integration_conf(None); + let prom_bind = format!("{}:{}", "127.0.0.1", 6000); + naka_conf.node.prometheus_bind = Some(prom_bind.clone()); + naka_conf.miner.wait_on_interim_blocks = Duration::from_secs(1000); + let sender_sk = Secp256k1PrivateKey::new(); + // setup sender + recipient for a test stx transfer + let sender_addr = tests::to_addr(&sender_sk); + let send_amt = 1000; + let send_fee = 100; + naka_conf.add_initial_balance( + PrincipalData::from(sender_addr.clone()).to_string(), + send_amt * 2 + send_fee, + ); + let sender_signer_sk = Secp256k1PrivateKey::new(); + let sender_signer_addr = tests::to_addr(&sender_signer_sk); + naka_conf.add_initial_balance( + PrincipalData::from(sender_signer_addr.clone()).to_string(), + 100000, + ); + let recipient = PrincipalData::from(StacksAddress::burn_address(false)); + let stacker_sk = setup_stacker(&mut naka_conf); + + test_observer::spawn(); + let observer_port = test_observer::EVENT_OBSERVER_PORT; + naka_conf.events_observers.insert(EventObserverConfig { + endpoint: format!("localhost:{observer_port}"), + events_keys: vec![EventKeyType::AnyEvent], + }); + + let mut btcd_controller = BitcoinCoreController::new(naka_conf.clone()); + btcd_controller + .start_bitcoind() + .expect("Failed starting bitcoind"); + let mut btc_regtest_controller = BitcoinRegtestController::new(naka_conf.clone(), None); + btc_regtest_controller.bootstrap_chain(201); + + let mut run_loop = boot_nakamoto::BootRunLoop::new(naka_conf.clone()).unwrap(); + let run_loop_stopper = run_loop.get_termination_switch(); + let Counters { + blocks_processed, + naka_submitted_commits: commits_submitted, + naka_proposed_blocks: proposals_submitted, + naka_skip_commit_op: test_skip_commit_op, + .. + } = run_loop.counters(); + + let coord_channel = run_loop.coordinator_channels(); + + let run_loop_thread = thread::spawn(move || run_loop.start(None, 0)); + wait_for_runloop(&blocks_processed); + boot_to_epoch_3( + &naka_conf, + &blocks_processed, + &[stacker_sk], + &[sender_signer_sk], + &mut Some(&mut signers), + &mut btc_regtest_controller, + ); + + info!("Bootstrapped to Epoch-3.0 boundary, starting nakamoto miner"); + + let burnchain = naka_conf.get_burnchain(); + let sortdb = burnchain.open_sortition_db(true).unwrap(); + let (mut chainstate, _) = StacksChainState::open( + naka_conf.is_mainnet(), + naka_conf.burnchain.chain_id, + &naka_conf.get_chainstate_path_str(), + None, + ) + .unwrap(); + + let block_height_pre_3_0 = + NakamotoChainState::get_canonical_block_header(chainstate.db(), &sortdb) + .unwrap() + .unwrap() + .stacks_block_height; + + // query for prometheus metrics + #[cfg(feature = "monitoring_prom")] + { + let prom_http_origin = format!("http://{}", prom_bind); + let client = reqwest::blocking::Client::new(); + let res = client + .get(&prom_http_origin) + .send() + .unwrap() + .text() + .unwrap(); + let expected_result = format!("stacks_node_stacks_tip_height {block_height_pre_3_0}"); + assert!(res.contains(&expected_result)); + } + + info!("Nakamoto miner started..."); + blind_signer(&naka_conf, &signers, proposals_submitted); + + wait_for_first_naka_block_commit(60, &commits_submitted); + + // Mine a regular nakamoto tenure + next_block_and_mine_commit( + &mut btc_regtest_controller, + 60, + &coord_channel, + &commits_submitted, + ) + .unwrap(); + + signer_vote_if_needed( + &btc_regtest_controller, + &naka_conf, + &[sender_signer_sk], + &signers, + ); + + info!("Pausing commit ops to trigger a tenure extend."); + test_skip_commit_op.0.lock().unwrap().replace(true); + + next_block_and(&mut btc_regtest_controller, 60, || Ok(true)).unwrap(); + + signer_vote_if_needed( + &btc_regtest_controller, + &naka_conf, + &[sender_signer_sk], + &signers, + ); + + // Submit a TX + let transfer_tx = make_stacks_transfer(&sender_sk, 0, send_fee, &recipient, send_amt); + let transfer_tx_hex = format!("0x{}", to_hex(&transfer_tx)); + + let tip = NakamotoChainState::get_canonical_block_header(chainstate.db(), &sortdb) + .unwrap() + .unwrap(); + + let mut mempool = naka_conf + .connect_mempool_db() + .expect("Database failure opening mempool"); + + mempool + .submit_raw( + &mut chainstate, + &sortdb, + &tip.consensus_hash, + &tip.anchored_header.block_hash(), + transfer_tx.clone(), + &ExecutionCost::max_value(), + &StacksEpochId::Epoch30, + ) + .unwrap(); + + next_block_and_process_new_stacks_block(&mut btc_regtest_controller, 60, &coord_channel) + .unwrap(); + + signer_vote_if_needed( + &btc_regtest_controller, + &naka_conf, + &[sender_signer_sk], + &signers, + ); + + next_block_and(&mut btc_regtest_controller, 60, || Ok(true)).unwrap(); + + signer_vote_if_needed( + &btc_regtest_controller, + &naka_conf, + &[sender_signer_sk], + &signers, + ); + + info!("Resuming commit ops to mine regular tenures."); + test_skip_commit_op.0.lock().unwrap().replace(false); + + // Mine 15 more regular nakamoto tenures + for _i in 0..15 { + let commits_before = commits_submitted.load(Ordering::SeqCst); + let blocks_processed_before = coord_channel + .lock() + .expect("Mutex poisoned") + .get_stacks_blocks_processed(); + next_block_and(&mut btc_regtest_controller, 60, || { + let commits_count = commits_submitted.load(Ordering::SeqCst); + let blocks_processed = coord_channel + .lock() + .expect("Mutex poisoned") + .get_stacks_blocks_processed(); + Ok(commits_count > commits_before && blocks_processed > blocks_processed_before) + }) + .unwrap(); + + signer_vote_if_needed( + &btc_regtest_controller, + &naka_conf, + &[sender_signer_sk], + &signers, + ); + } + + // load the chain tip, and assert that it is a nakamoto block and at least 30 blocks have advanced in epoch 3 + let tip = NakamotoChainState::get_canonical_block_header(chainstate.db(), &sortdb) + .unwrap() + .unwrap(); + + // assert that the tenure extend tx was observed + let mut tenure_extends = vec![]; + let mut tenure_block_founds = vec![]; + let mut transfer_tx_included = false; + for block in test_observer::get_blocks() { + for tx in block["transactions"].as_array().unwrap() { + let raw_tx = tx["raw_tx"].as_str().unwrap(); + if raw_tx == &transfer_tx_hex { + transfer_tx_included = true; + continue; + } + if raw_tx == "0x00" { + continue; + } + let tx_bytes = hex_bytes(&raw_tx[2..]).unwrap(); + let parsed = StacksTransaction::consensus_deserialize(&mut &tx_bytes[..]).unwrap(); + match &parsed.payload { + TransactionPayload::TenureChange(payload) => match payload.cause { + TenureChangeCause::Extended => tenure_extends.push(parsed), + TenureChangeCause::BlockFound => tenure_block_founds.push(parsed), + }, + _ => {} + }; + } + } + assert!( + !tenure_extends.is_empty(), + "Nakamoto node failed to include the tenure extend txs" + ); + + assert!( + tenure_block_founds.len() >= 17 - tenure_extends.len(), + "Nakamoto node failed to include the block found tx per winning sortition" + ); + + assert!( + transfer_tx_included, + "Nakamoto node failed to include the transfer tx" + ); + + assert!(tip.anchored_header.as_stacks_nakamoto().is_some()); + assert!(tip.stacks_block_height >= block_height_pre_3_0 + 17); + + // make sure prometheus returns an updated height + #[cfg(feature = "monitoring_prom")] + { + let prom_http_origin = format!("http://{}", prom_bind); + let client = reqwest::blocking::Client::new(); + let res = client + .get(&prom_http_origin) + .send() + .unwrap() + .text() + .unwrap(); + let expected_result = format!("stacks_node_stacks_tip_height {}", tip.stacks_block_height); + assert!(res.contains(&expected_result)); + } + + coord_channel + .lock() + .expect("Mutex poisoned") + .stop_chains_coordinator(); + run_loop_stopper.store(false, Ordering::SeqCst); + + run_loop_thread.join().unwrap(); +} + +#[test] +#[ignore] +/// Verify the timestamps using `get-block-info?`, `get-stacks-block-info?`, and `get-tenure-info?`. +fn check_block_times() { + if env::var("BITCOIND_TEST") != Ok("1".into()) { + return; + } + + let mut signers = TestSigners::default(); + let (mut naka_conf, _miner_account) = naka_neon_integration_conf(None); + let http_origin = format!("http://{}", &naka_conf.node.rpc_bind); + naka_conf.miner.wait_on_interim_blocks = Duration::from_secs(1); + let sender_sk = Secp256k1PrivateKey::new(); + let sender_signer_sk = Secp256k1PrivateKey::new(); + let sender_signer_addr = tests::to_addr(&sender_signer_sk); + + // setup sender + recipient for some test stx transfers + // these are necessary for the interim blocks to get mined at all + let sender_addr = tests::to_addr(&sender_sk); + let send_amt = 100; + let send_fee = 180; + let deploy_fee = 3000; + naka_conf.add_initial_balance( + PrincipalData::from(sender_addr.clone()).to_string(), + 3 * deploy_fee + (send_amt + send_fee) * 2, + ); + naka_conf.add_initial_balance( + PrincipalData::from(sender_signer_addr.clone()).to_string(), + 100000, + ); + let recipient = PrincipalData::from(StacksAddress::burn_address(false)); + let stacker_sk = setup_stacker(&mut naka_conf); + + test_observer::spawn(); + let observer_port = test_observer::EVENT_OBSERVER_PORT; + naka_conf.events_observers.insert(EventObserverConfig { + endpoint: format!("localhost:{observer_port}"), + events_keys: vec![EventKeyType::AnyEvent], + }); + + let mut btcd_controller = BitcoinCoreController::new(naka_conf.clone()); + btcd_controller + .start_bitcoind() + .expect("Failed starting bitcoind"); + let mut btc_regtest_controller = BitcoinRegtestController::new(naka_conf.clone(), None); + btc_regtest_controller.bootstrap_chain(201); + + let mut run_loop = boot_nakamoto::BootRunLoop::new(naka_conf.clone()).unwrap(); + let run_loop_stopper = run_loop.get_termination_switch(); + let Counters { + blocks_processed, + naka_submitted_commits: commits_submitted, + naka_proposed_blocks: proposals_submitted, + .. + } = run_loop.counters(); + + let coord_channel = run_loop.coordinator_channels(); + + let run_loop_thread = thread::Builder::new() + .name("run_loop".into()) + .spawn(move || run_loop.start(None, 0)) + .unwrap(); + wait_for_runloop(&blocks_processed); + + let mut sender_nonce = 0; + + // Deploy this version with the Clarity 1 / 2 before epoch 3 + let contract0_name = "test-contract-0"; + let contract_clarity1 = + "(define-read-only (get-time (height uint)) (get-block-info? time height))"; + + let contract_tx0 = make_contract_publish( + &sender_sk, + sender_nonce, + deploy_fee, + contract0_name, + contract_clarity1, + ); + sender_nonce += 1; + submit_tx(&http_origin, &contract_tx0); + + boot_to_epoch_3( + &naka_conf, + &blocks_processed, + &[stacker_sk], + &[sender_signer_sk], + &mut Some(&mut signers), + &mut btc_regtest_controller, + ); + + info!("Bootstrapped to Epoch-3.0 boundary, starting nakamoto miner"); + + info!("Nakamoto miner started..."); + blind_signer(&naka_conf, &signers, proposals_submitted); + + let time0_value = call_read_only( + &naka_conf, + &sender_addr, + contract0_name, + "get-time", + vec![&clarity::vm::Value::UInt(1)], + ); + let time0 = time0_value + .expect_optional() + .unwrap() + .unwrap() + .expect_u128() + .unwrap(); + info!("Time from pre-epoch 3.0: {}", time0); + + wait_for_first_naka_block_commit(60, &commits_submitted); + + // This version uses the Clarity 1 / 2 function + let contract1_name = "test-contract-1"; + let contract_tx1 = make_contract_publish_versioned( + &sender_sk, + sender_nonce, + deploy_fee, + contract1_name, + contract_clarity1, + Some(ClarityVersion::Clarity2), + ); + sender_nonce += 1; + submit_tx(&http_origin, &contract_tx1); + + // This version uses the Clarity 3 functions + let contract3_name = "test-contract-3"; + let contract_clarity3 = + "(define-read-only (get-block-time (height uint)) (get-stacks-block-info? time height)) + (define-read-only (get-tenure-time (height uint)) (get-tenure-info? time height))"; + + let contract_tx3 = make_contract_publish( + &sender_sk, + sender_nonce, + deploy_fee, + contract3_name, + contract_clarity3, + ); + sender_nonce += 1; + submit_tx(&http_origin, &contract_tx3); + + next_block_and_process_new_stacks_block(&mut btc_regtest_controller, 60, &coord_channel) + .unwrap(); + + let info = get_chain_info_result(&naka_conf).unwrap(); + info!("Chain info: {:?}", info); + let last_stacks_block_height = info.stacks_tip_height as u128; + let last_tenure_height = last_stacks_block_height as u128; + + let time0_value = call_read_only( + &naka_conf, + &sender_addr, + contract0_name, + "get-time", + vec![&clarity::vm::Value::UInt(last_stacks_block_height - 1)], + ); + let time0 = time0_value + .expect_optional() + .unwrap() + .unwrap() + .expect_u128() + .unwrap(); + + let time1_value = call_read_only( + &naka_conf, + &sender_addr, + contract1_name, + "get-time", + vec![&clarity::vm::Value::UInt(last_stacks_block_height - 1)], + ); + let time1 = time1_value + .expect_optional() + .unwrap() + .unwrap() + .expect_u128() + .unwrap(); + assert_eq!( + time0, time1, + "Time from pre- and post-epoch 3.0 contracts should match" + ); + + let time3_tenure_value = call_read_only( + &naka_conf, + &sender_addr, + contract3_name, + "get-tenure-time", + vec![&clarity::vm::Value::UInt(last_tenure_height - 1)], + ); + let time3_tenure = time3_tenure_value + .expect_optional() + .unwrap() + .unwrap() + .expect_u128() + .unwrap(); + assert_eq!( + time0, time3_tenure, + "Tenure time should match Clarity 2 block time" + ); + + let time3_block_value = call_read_only( + &naka_conf, + &sender_addr, + contract3_name, + "get-block-time", + vec![&clarity::vm::Value::UInt(last_stacks_block_height - 1)], + ); + let time3_block = time3_block_value + .expect_optional() + .unwrap() + .unwrap() + .expect_u128() + .unwrap(); + + // Sleep to ensure the seconds have changed + thread::sleep(Duration::from_secs(1)); + + // Mine a Nakamoto block + info!("Mining Nakamoto block"); + let blocks_processed_before = coord_channel + .lock() + .expect("Mutex poisoned") + .get_stacks_blocks_processed(); + + // submit a tx so that the miner will mine an extra block + let transfer_tx = + make_stacks_transfer(&sender_sk, sender_nonce, send_fee, &recipient, send_amt); + sender_nonce += 1; + submit_tx(&http_origin, &transfer_tx); + + loop { + let blocks_processed = coord_channel + .lock() + .expect("Mutex poisoned") + .get_stacks_blocks_processed(); + if blocks_processed > blocks_processed_before { + break; + } + thread::sleep(Duration::from_millis(100)); + } + + let info = get_chain_info_result(&naka_conf).unwrap(); + info!("Chain info: {:?}", info); + let last_stacks_block_height = info.stacks_tip_height as u128; + + let time0a_value = call_read_only( + &naka_conf, + &sender_addr, + contract0_name, + "get-time", + vec![&clarity::vm::Value::UInt(last_stacks_block_height - 1)], + ); + let time0a = time0a_value + .expect_optional() + .unwrap() + .unwrap() + .expect_u128() + .unwrap(); + assert!( + time0a - time0 >= 1, + "get-block-info? time should have changed" + ); + + let time1a_value = call_read_only( + &naka_conf, + &sender_addr, + contract1_name, + "get-time", + vec![&clarity::vm::Value::UInt(last_stacks_block_height - 1)], + ); + let time1a = time1a_value + .expect_optional() + .unwrap() + .unwrap() + .expect_u128() + .unwrap(); + assert_eq!( + time0a, time1a, + "Time from pre- and post-epoch 3.0 contracts should match" + ); + + let time3a_block_value = call_read_only( + &naka_conf, + &sender_addr, + contract3_name, + "get-block-time", + vec![&clarity::vm::Value::UInt(last_stacks_block_height - 1)], + ); + let time3a_block = time3a_block_value + .expect_optional() + .unwrap() + .unwrap() + .expect_u128() + .unwrap(); + assert!( + time3a_block - time3_block >= 1, + "get-stacks-block-info? time should have changed" + ); + + // Sleep to ensure the seconds have changed + thread::sleep(Duration::from_secs(1)); + + // Mine a Nakamoto block + info!("Mining Nakamoto block"); + let blocks_processed_before = coord_channel + .lock() + .expect("Mutex poisoned") + .get_stacks_blocks_processed(); + + // submit a tx so that the miner will mine an extra block + let transfer_tx = + make_stacks_transfer(&sender_sk, sender_nonce, send_fee, &recipient, send_amt); + submit_tx(&http_origin, &transfer_tx); + + loop { + let blocks_processed = coord_channel + .lock() + .expect("Mutex poisoned") + .get_stacks_blocks_processed(); + if blocks_processed > blocks_processed_before { + break; + } + thread::sleep(Duration::from_millis(100)); + } + + let time0b_value = call_read_only( + &naka_conf, + &sender_addr, + contract0_name, + "get-time", + vec![&clarity::vm::Value::UInt(last_stacks_block_height)], + ); + let time0b = time0b_value + .expect_optional() + .unwrap() + .unwrap() + .expect_u128() + .unwrap(); + assert_eq!( + time0a, time0b, + "get-block-info? time should not have changed" + ); + + let time1b_value = call_read_only( + &naka_conf, + &sender_addr, + contract1_name, + "get-time", + vec![&clarity::vm::Value::UInt(last_stacks_block_height)], + ); + let time1b = time1b_value + .expect_optional() + .unwrap() + .unwrap() + .expect_u128() + .unwrap(); + assert_eq!( + time0b, time1b, + "Time from pre- and post-epoch 3.0 contracts should match" + ); + + let time3b_block_value = call_read_only( + &naka_conf, + &sender_addr, + contract3_name, + "get-block-time", + vec![&clarity::vm::Value::UInt(last_stacks_block_height)], + ); + let time3b_block = time3b_block_value + .expect_optional() + .unwrap() + .unwrap() + .expect_u128() + .unwrap(); + + assert!( + time3b_block - time3a_block >= 1, + "get-stacks-block-info? time should have changed" + ); + + coord_channel + .lock() + .expect("Mutex poisoned") + .stop_chains_coordinator(); + run_loop_stopper.store(false, Ordering::SeqCst); + + run_loop_thread.join().unwrap(); +} + +fn assert_block_info( + tuple0: &BTreeMap, + miner: &Value, + miner_spend: &clarity::vm::Value, +) { + assert!(tuple0 + .get("burnchain-header-hash") + .unwrap() + .clone() + .expect_optional() + .unwrap() + .is_some()); + assert!(tuple0 + .get("id-header-hash") + .unwrap() + .clone() + .expect_optional() + .unwrap() + .is_some()); + assert!(tuple0 + .get("header-hash") + .unwrap() + .clone() + .expect_optional() + .unwrap() + .is_some()); + assert_eq!( + &tuple0 + .get("miner-address") + .unwrap() + .clone() + .expect_optional() + .unwrap() + .unwrap(), + miner + ); + assert!(tuple0 + .get("time") + .unwrap() + .clone() + .expect_optional() + .unwrap() + .is_some()); + assert!(tuple0 + .get("vrf-seed") + .unwrap() + .clone() + .expect_optional() + .unwrap() + .is_some()); + assert!(tuple0 + .get("block-reward") + .unwrap() + .clone() + .expect_optional() + .unwrap() + .is_none()); // not yet mature + assert_eq!( + &tuple0 + .get("miner-spend-total") + .unwrap() + .clone() + .expect_optional() + .unwrap() + .unwrap(), + miner_spend + ); + assert_eq!( + &tuple0 + .get("miner-spend-winner") + .unwrap() + .clone() + .expect_optional() + .unwrap() + .unwrap(), + miner_spend + ); +} + +#[test] +#[ignore] +/// Verify all properties in `get-block-info?`, `get-stacks-block-info?`, and `get-tenure-info?`. +fn check_block_info() { + if env::var("BITCOIND_TEST") != Ok("1".into()) { + return; + } + + let mut signers = TestSigners::default(); + let (mut naka_conf, _miner_account) = naka_neon_integration_conf(None); + let http_origin = format!("http://{}", &naka_conf.node.rpc_bind); + naka_conf.miner.wait_on_interim_blocks = Duration::from_secs(1); + let sender_sk = Secp256k1PrivateKey::new(); + let sender_signer_sk = Secp256k1PrivateKey::new(); + let sender_signer_addr = tests::to_addr(&sender_signer_sk); + + // setup sender + recipient for some test stx transfers + // these are necessary for the interim blocks to get mined at all + let sender_addr = tests::to_addr(&sender_sk); + let send_amt = 100; + let send_fee = 180; + let deploy_fee = 3000; + naka_conf.add_initial_balance( + PrincipalData::from(sender_addr.clone()).to_string(), + 3 * deploy_fee + (send_amt + send_fee) * 2, + ); + naka_conf.add_initial_balance( + PrincipalData::from(sender_signer_addr.clone()).to_string(), + 100000, + ); + let recipient = PrincipalData::from(StacksAddress::burn_address(false)); + let stacker_sk = setup_stacker(&mut naka_conf); + + test_observer::spawn(); + let observer_port = test_observer::EVENT_OBSERVER_PORT; + naka_conf.events_observers.insert(EventObserverConfig { + endpoint: format!("localhost:{observer_port}"), + events_keys: vec![EventKeyType::AnyEvent], + }); + + let mut btcd_controller = BitcoinCoreController::new(naka_conf.clone()); + btcd_controller + .start_bitcoind() + .expect("Failed starting bitcoind"); + let mut btc_regtest_controller = BitcoinRegtestController::new(naka_conf.clone(), None); + btc_regtest_controller.bootstrap_chain(201); + + let mut run_loop = boot_nakamoto::BootRunLoop::new(naka_conf.clone()).unwrap(); + let run_loop_stopper = run_loop.get_termination_switch(); + let Counters { + blocks_processed, + naka_submitted_commits: commits_submitted, + naka_proposed_blocks: proposals_submitted, + .. + } = run_loop.counters(); + + let coord_channel = run_loop.coordinator_channels(); + + let run_loop_thread = thread::Builder::new() + .name("run_loop".into()) + .spawn(move || run_loop.start(None, 0)) + .unwrap(); + wait_for_runloop(&blocks_processed); + + let mut sender_nonce = 0; + + let miner = clarity::vm::Value::Principal( + PrincipalData::parse_standard_principal("ST25WA53N4PWF8XZGQH2J5A4CGCWV4JADPM8MHTRV") + .unwrap() + .into(), + ); + let miner_spend = clarity::vm::Value::UInt(20000); + + // Deploy this version with the Clarity 1 / 2 before epoch 3 + let contract0_name = "test-contract-0"; + let contract_clarity1 = "(define-read-only (get-info (height uint)) + { + burnchain-header-hash: (get-block-info? burnchain-header-hash height), + id-header-hash: (get-block-info? id-header-hash height), + header-hash: (get-block-info? header-hash height), + miner-address: (get-block-info? miner-address height), + time: (get-block-info? time height), + vrf-seed: (get-block-info? vrf-seed height), + block-reward: (get-block-info? block-reward height), + miner-spend-total: (get-block-info? miner-spend-total height), + miner-spend-winner: (get-block-info? miner-spend-winner height), + } + )"; + + let contract_tx0 = make_contract_publish( + &sender_sk, + sender_nonce, + deploy_fee, + contract0_name, + contract_clarity1, + ); + sender_nonce += 1; + submit_tx(&http_origin, &contract_tx0); + + boot_to_epoch_3( + &naka_conf, + &blocks_processed, + &[stacker_sk], + &[sender_signer_sk], + &mut Some(&mut signers), + &mut btc_regtest_controller, + ); + + info!("Bootstrapped to Epoch-3.0 boundary, starting nakamoto miner"); + + info!("Nakamoto miner started..."); + blind_signer(&naka_conf, &signers, proposals_submitted); + + let result0 = call_read_only( + &naka_conf, + &sender_addr, + contract0_name, + "get-info", + vec![&clarity::vm::Value::UInt(1)], + ); + let tuple0 = result0.expect_tuple().unwrap().data_map; + info!("Info from pre-epoch 3.0: {:?}", tuple0); + + wait_for_first_naka_block_commit(60, &commits_submitted); + + // This version uses the Clarity 1 / 2 function + let contract1_name = "test-contract-1"; + let contract_tx1 = make_contract_publish_versioned( + &sender_sk, + sender_nonce, + deploy_fee, + contract1_name, + contract_clarity1, + Some(ClarityVersion::Clarity2), + ); + sender_nonce += 1; + submit_tx(&http_origin, &contract_tx1); + + // This version uses the Clarity 3 functions + let contract3_name = "test-contract-3"; + let contract_clarity3 = "(define-read-only (get-block-info (height uint)) + { + id-header-hash: (get-stacks-block-info? id-header-hash height), + header-hash: (get-stacks-block-info? header-hash height), + time: (get-stacks-block-info? time height), + } + ) + (define-read-only (get-tenure-info (height uint)) + { + burnchain-header-hash: (get-tenure-info? burnchain-header-hash height), + miner-address: (get-tenure-info? miner-address height), + time: (get-tenure-info? time height), + vrf-seed: (get-tenure-info? vrf-seed height), + block-reward: (get-tenure-info? block-reward height), + miner-spend-total: (get-tenure-info? miner-spend-total height), + miner-spend-winner: (get-tenure-info? miner-spend-winner height), + } + )"; + + let contract_tx3 = make_contract_publish( + &sender_sk, + sender_nonce, + deploy_fee, + contract3_name, + contract_clarity3, + ); + sender_nonce += 1; + submit_tx(&http_origin, &contract_tx3); + + next_block_and_process_new_stacks_block(&mut btc_regtest_controller, 60, &coord_channel) + .unwrap(); + + let info = get_chain_info_result(&naka_conf).unwrap(); + info!("Chain info: {:?}", info); + let last_stacks_block_height = info.stacks_tip_height as u128; + + let result0 = call_read_only( + &naka_conf, + &sender_addr, + contract0_name, + "get-info", + vec![&clarity::vm::Value::UInt(last_stacks_block_height - 1)], + ); + let tuple0 = result0.expect_tuple().unwrap().data_map; + assert_block_info(&tuple0, &miner, &miner_spend); + + let result1 = call_read_only( + &naka_conf, + &sender_addr, + contract1_name, + "get-info", + vec![&clarity::vm::Value::UInt(last_stacks_block_height - 1)], + ); + let tuple1 = result1.expect_tuple().unwrap().data_map; + assert_eq!(tuple0, tuple1); + + let result3_tenure = call_read_only( + &naka_conf, + &sender_addr, + contract3_name, + "get-tenure-info", + vec![&clarity::vm::Value::UInt(last_stacks_block_height - 1)], + ); + let tuple3_tenure0 = result3_tenure.expect_tuple().unwrap().data_map; + assert_eq!( + tuple3_tenure0.get("burnchain-header-hash"), + tuple0.get("burnchain-header-hash") + ); + assert_eq!( + tuple3_tenure0.get("miner-address"), + tuple0.get("miner-address") + ); + assert_eq!(tuple3_tenure0.get("time"), tuple0.get("time")); + assert_eq!(tuple3_tenure0.get("vrf-seed"), tuple0.get("vrf-seed")); + assert_eq!( + tuple3_tenure0.get("block-reward"), + tuple0.get("block-reward") + ); + assert_eq!( + tuple3_tenure0.get("miner-spend-total"), + tuple0.get("miner-spend-total") + ); + assert_eq!( + tuple3_tenure0.get("miner-spend-winner"), + tuple0.get("miner-spend-winner") + ); + + let result3_block = call_read_only( + &naka_conf, + &sender_addr, + contract3_name, + "get-block-info", + vec![&clarity::vm::Value::UInt(last_stacks_block_height - 1)], + ); + let tuple3_block1 = result3_block.expect_tuple().unwrap().data_map; + assert_eq!( + tuple3_block1.get("id-header-hash"), + tuple0.get("id-header-hash") + ); + assert_eq!(tuple3_block1.get("header-hash"), tuple0.get("header-hash")); + assert!(tuple3_block1 + .get("time") + .unwrap() + .clone() + .expect_optional() + .unwrap() + .is_some()); + + // Sleep to ensure the seconds have changed + thread::sleep(Duration::from_secs(1)); + + // Mine a Nakamoto block + info!("Mining Nakamoto block"); + let blocks_processed_before = coord_channel + .lock() + .expect("Mutex poisoned") + .get_stacks_blocks_processed(); + + // submit a tx so that the miner will mine an extra block + let transfer_tx = + make_stacks_transfer(&sender_sk, sender_nonce, send_fee, &recipient, send_amt); + sender_nonce += 1; + submit_tx(&http_origin, &transfer_tx); + + loop { + let blocks_processed = coord_channel + .lock() + .expect("Mutex poisoned") + .get_stacks_blocks_processed(); + if blocks_processed > blocks_processed_before { + break; + } + thread::sleep(Duration::from_millis(100)); + } + + let info = get_chain_info_result(&naka_conf).unwrap(); + info!("Chain info: {:?}", info); + let last_stacks_block_height = info.stacks_tip_height as u128; + + let result0 = call_read_only( + &naka_conf, + &sender_addr, + contract0_name, + "get-info", + vec![&clarity::vm::Value::UInt(last_stacks_block_height - 1)], + ); + let tuple0 = result0.expect_tuple().unwrap().data_map; + assert_block_info(&tuple0, &miner, &miner_spend); + + let result1 = call_read_only( + &naka_conf, + &sender_addr, + contract1_name, + "get-info", + vec![&clarity::vm::Value::UInt(last_stacks_block_height - 1)], + ); + let tuple1 = result1.expect_tuple().unwrap().data_map; + assert_eq!(tuple0, tuple1); + + let result3_tenure = call_read_only( + &naka_conf, + &sender_addr, + contract3_name, + "get-tenure-info", + vec![&clarity::vm::Value::UInt(last_stacks_block_height - 1)], + ); + let tuple3_tenure1 = result3_tenure.expect_tuple().unwrap().data_map; + // There should have been a tenure change, so these should be different. + assert_ne!(tuple3_tenure0, tuple3_tenure1); + assert_eq!( + tuple3_tenure1.get("burnchain-header-hash"), + tuple0.get("burnchain-header-hash") + ); + assert_eq!( + tuple3_tenure1.get("miner-address"), + tuple0.get("miner-address") + ); + assert_eq!(tuple3_tenure1.get("time"), tuple0.get("time")); + assert_eq!(tuple3_tenure1.get("vrf-seed"), tuple0.get("vrf-seed")); + assert_eq!( + tuple3_tenure1.get("block-reward"), + tuple0.get("block-reward") + ); + assert_eq!( + tuple3_tenure1.get("miner-spend-total"), + tuple0.get("miner-spend-total") + ); + assert_eq!( + tuple3_tenure1.get("miner-spend-winner"), + tuple0.get("miner-spend-winner") + ); + + let result3_block = call_read_only( + &naka_conf, + &sender_addr, + contract3_name, + "get-block-info", + vec![&clarity::vm::Value::UInt(last_stacks_block_height - 1)], + ); + let tuple3_block2 = result3_block.expect_tuple().unwrap().data_map; + // There should have been a block change, so these should be different. + assert_ne!(tuple3_block1, tuple3_block2); + assert_eq!( + tuple3_block2.get("id-header-hash"), + tuple0.get("id-header-hash") + ); + assert_eq!(tuple3_block2.get("header-hash"), tuple0.get("header-hash")); + assert!(tuple3_block2 + .get("time") + .unwrap() + .clone() + .expect_optional() + .unwrap() + .is_some()); + + // Sleep to ensure the seconds have changed + thread::sleep(Duration::from_secs(1)); + + // Mine a Nakamoto block + info!("Mining Nakamoto block"); + let blocks_processed_before = coord_channel + .lock() + .expect("Mutex poisoned") + .get_stacks_blocks_processed(); + + // submit a tx so that the miner will mine an extra block + let transfer_tx = + make_stacks_transfer(&sender_sk, sender_nonce, send_fee, &recipient, send_amt); + submit_tx(&http_origin, &transfer_tx); + + loop { + let blocks_processed = coord_channel + .lock() + .expect("Mutex poisoned") + .get_stacks_blocks_processed(); + if blocks_processed > blocks_processed_before { + break; + } + thread::sleep(Duration::from_millis(100)); + } + + let info = get_chain_info_result(&naka_conf).unwrap(); + info!("Chain info: {:?}", info); + let last_stacks_block_height = info.stacks_tip_height as u128; + + let result0 = call_read_only( + &naka_conf, + &sender_addr, + contract0_name, + "get-info", + vec![&clarity::vm::Value::UInt(last_stacks_block_height - 1)], + ); + let tuple0 = result0.expect_tuple().unwrap().data_map; + assert_block_info(&tuple0, &miner, &miner_spend); + + let result1 = call_read_only( + &naka_conf, + &sender_addr, + contract1_name, + "get-info", + vec![&clarity::vm::Value::UInt(last_stacks_block_height - 1)], + ); + let tuple1 = result1.expect_tuple().unwrap().data_map; + assert_eq!(tuple0, tuple1); + + let result3_tenure = call_read_only( + &naka_conf, + &sender_addr, + contract3_name, + "get-tenure-info", + vec![&clarity::vm::Value::UInt(last_stacks_block_height - 1)], + ); + let tuple3_tenure1a = result3_tenure.expect_tuple().unwrap().data_map; + assert_eq!(tuple3_tenure1, tuple3_tenure1a); + + let result3_block = call_read_only( + &naka_conf, + &sender_addr, + contract3_name, + "get-block-info", + vec![&clarity::vm::Value::UInt(last_stacks_block_height - 1)], + ); + let tuple3_block3 = result3_block.expect_tuple().unwrap().data_map; + // There should have been a block change, so these should be different. + assert_ne!(tuple3_block3, tuple3_block2); + assert_eq!( + tuple3_block3.get("id-header-hash"), + tuple0.get("id-header-hash") + ); + assert_eq!(tuple3_block3.get("header-hash"), tuple0.get("header-hash")); + assert!(tuple3_block3 + .get("time") + .unwrap() + .clone() + .expect_optional() + .unwrap() + .is_some()); + + coord_channel + .lock() + .expect("Mutex poisoned") + .stop_chains_coordinator(); + run_loop_stopper.store(false, Ordering::SeqCst); + + run_loop_thread.join().unwrap(); +} + +fn get_expected_reward_for_height(blocks: &Vec, block_height: u128) -> u128 { + // Find the target block + let target_block = blocks + .iter() + .find(|b| b["block_height"].as_u64().unwrap() == block_height as u64) + .unwrap(); + + // Find the tenure change block (the first block with this burn block hash) + let tenure_burn_block_hash = target_block["burn_block_hash"].as_str().unwrap(); + let tenure_block = blocks + .iter() + .find(|b| b["burn_block_hash"].as_str().unwrap() == tenure_burn_block_hash) + .unwrap(); + let matured_block_hash = tenure_block["block_hash"].as_str().unwrap(); + + let mut expected_reward_opt = None; + for block in blocks.iter().rev() { + for rewards in block["matured_miner_rewards"].as_array().unwrap() { + if rewards.as_object().unwrap()["from_stacks_block_hash"] + .as_str() + .unwrap() + == matured_block_hash + { + let reward_object = rewards.as_object().unwrap(); + let coinbase_amount: u128 = reward_object["coinbase_amount"] + .as_str() + .unwrap() + .parse() + .unwrap(); + let tx_fees_anchored: u128 = reward_object["tx_fees_anchored"] + .as_str() + .unwrap() + .parse() + .unwrap(); + let tx_fees_streamed_confirmed: u128 = reward_object["tx_fees_streamed_confirmed"] + .as_str() + .unwrap() + .parse() + .unwrap(); + let tx_fees_streamed_produced: u128 = reward_object["tx_fees_streamed_produced"] + .as_str() + .unwrap() + .parse() + .unwrap(); + expected_reward_opt = Some( + expected_reward_opt.unwrap_or(0) + + coinbase_amount + + tx_fees_anchored + + tx_fees_streamed_confirmed + + tx_fees_streamed_produced, + ); + } + } + + if let Some(expected_reward) = expected_reward_opt { + return expected_reward; + } + } + panic!("Expected reward not found"); +} + +#[test] +#[ignore] +/// Verify `block-reward` property in `get-block-info?` and `get-tenure-info?`. +/// This test is separated from `check_block_info` above because it needs to +/// mine 100+ blocks to mature the block reward, so it is slow. +fn check_block_info_rewards() { + if env::var("BITCOIND_TEST") != Ok("1".into()) { + return; + } + + let mut signers = TestSigners::default(); + let (mut naka_conf, _miner_account) = naka_neon_integration_conf(None); + let http_origin = format!("http://{}", &naka_conf.node.rpc_bind); + naka_conf.miner.wait_on_interim_blocks = Duration::from_secs(1); + let sender_sk = Secp256k1PrivateKey::new(); + let sender_signer_sk = Secp256k1PrivateKey::new(); + let sender_signer_addr = tests::to_addr(&sender_signer_sk); + + // setup sender + recipient for some test stx transfers + // these are necessary for the interim blocks to get mined at all + let sender_addr = tests::to_addr(&sender_sk); + let send_amt = 100; + let send_fee = 180; + let deploy_fee = 3000; + naka_conf.add_initial_balance( + PrincipalData::from(sender_addr.clone()).to_string(), + 3 * deploy_fee + (send_amt + send_fee) * 2, + ); + naka_conf.add_initial_balance( + PrincipalData::from(sender_signer_addr.clone()).to_string(), + 100000, + ); + let recipient = PrincipalData::from(StacksAddress::burn_address(false)); + let stacker_sk = setup_stacker(&mut naka_conf); + + test_observer::spawn(); + let observer_port = test_observer::EVENT_OBSERVER_PORT; + naka_conf.events_observers.insert(EventObserverConfig { + endpoint: format!("localhost:{observer_port}"), + events_keys: vec![EventKeyType::AnyEvent], + }); + + let mut btcd_controller = BitcoinCoreController::new(naka_conf.clone()); + btcd_controller + .start_bitcoind() + .expect("Failed starting bitcoind"); + let mut btc_regtest_controller = BitcoinRegtestController::new(naka_conf.clone(), None); + btc_regtest_controller.bootstrap_chain(201); + + let mut run_loop = boot_nakamoto::BootRunLoop::new(naka_conf.clone()).unwrap(); + let run_loop_stopper = run_loop.get_termination_switch(); + let Counters { + blocks_processed, + naka_submitted_commits: commits_submitted, + naka_proposed_blocks: proposals_submitted, + .. + } = run_loop.counters(); + + let coord_channel = run_loop.coordinator_channels(); + + let run_loop_thread = thread::Builder::new() + .name("run_loop".into()) + .spawn(move || run_loop.start(None, 0)) + .unwrap(); + wait_for_runloop(&blocks_processed); + + let mut sender_nonce = 0; + + // Deploy this version with the Clarity 1 / 2 before epoch 3 + let contract0_name = "test-contract-0"; + let contract_clarity1 = "(define-read-only (get-info (height uint)) + { + burnchain-header-hash: (get-block-info? burnchain-header-hash height), + id-header-hash: (get-block-info? id-header-hash height), + header-hash: (get-block-info? header-hash height), + miner-address: (get-block-info? miner-address height), + time: (get-block-info? time height), + vrf-seed: (get-block-info? vrf-seed height), + block-reward: (get-block-info? block-reward height), + miner-spend-total: (get-block-info? miner-spend-total height), + miner-spend-winner: (get-block-info? miner-spend-winner height), + } + )"; + + let contract_tx0 = make_contract_publish( + &sender_sk, + sender_nonce, + deploy_fee, + contract0_name, + contract_clarity1, + ); + sender_nonce += 1; + submit_tx(&http_origin, &contract_tx0); + + boot_to_epoch_3( + &naka_conf, + &blocks_processed, + &[stacker_sk], + &[sender_signer_sk], + &mut Some(&mut signers), + &mut btc_regtest_controller, + ); + + info!("Bootstrapped to Epoch-3.0 boundary, starting nakamoto miner"); + + info!("Nakamoto miner started..."); + blind_signer(&naka_conf, &signers, proposals_submitted); + + let result0 = call_read_only( + &naka_conf, + &sender_addr, + contract0_name, + "get-info", + vec![&clarity::vm::Value::UInt(1)], + ); + let tuple0 = result0.expect_tuple().unwrap().data_map; + info!("Info from pre-epoch 3.0: {:?}", tuple0); + + wait_for_first_naka_block_commit(60, &commits_submitted); + + // This version uses the Clarity 1 / 2 function + let contract1_name = "test-contract-1"; + let contract_tx1 = make_contract_publish_versioned( + &sender_sk, + sender_nonce, + deploy_fee, + contract1_name, + contract_clarity1, + Some(ClarityVersion::Clarity2), + ); + sender_nonce += 1; + submit_tx(&http_origin, &contract_tx1); + + // This version uses the Clarity 3 functions + let contract3_name = "test-contract-3"; + let contract_clarity3 = "(define-read-only (get-tenure-info (height uint)) + { + burnchain-header-hash: (get-tenure-info? burnchain-header-hash height), + miner-address: (get-tenure-info? miner-address height), + time: (get-tenure-info? time height), + vrf-seed: (get-tenure-info? vrf-seed height), + block-reward: (get-tenure-info? block-reward height), + miner-spend-total: (get-tenure-info? miner-spend-total height), + miner-spend-winner: (get-tenure-info? miner-spend-winner height), + } + )"; + + let contract_tx3 = make_contract_publish( + &sender_sk, + sender_nonce, + deploy_fee, + contract3_name, + contract_clarity3, + ); + sender_nonce += 1; + submit_tx(&http_origin, &contract_tx3); + + next_block_and_process_new_stacks_block(&mut btc_regtest_controller, 60, &coord_channel) + .unwrap(); + + // Sleep to ensure the seconds have changed + thread::sleep(Duration::from_secs(1)); + + // Mine a Nakamoto block + info!("Mining Nakamoto block"); + let blocks_processed_before = coord_channel + .lock() + .expect("Mutex poisoned") + .get_stacks_blocks_processed(); + + // submit a tx so that the miner will mine an extra block + let transfer_tx = + make_stacks_transfer(&sender_sk, sender_nonce, send_fee, &recipient, send_amt); + sender_nonce += 1; + submit_tx(&http_origin, &transfer_tx); + + loop { + let blocks_processed = coord_channel + .lock() + .expect("Mutex poisoned") + .get_stacks_blocks_processed(); + if blocks_processed > blocks_processed_before { + break; + } + thread::sleep(Duration::from_millis(100)); + } + + // Sleep to ensure the seconds have changed + thread::sleep(Duration::from_secs(1)); + + // Mine a Nakamoto block + info!("Mining Nakamoto block"); + let blocks_processed_before = coord_channel + .lock() + .expect("Mutex poisoned") + .get_stacks_blocks_processed(); + + // submit a tx so that the miner will mine an extra block + let transfer_tx = + make_stacks_transfer(&sender_sk, sender_nonce, send_fee, &recipient, send_amt); + submit_tx(&http_origin, &transfer_tx); + + loop { + let blocks_processed = coord_channel + .lock() + .expect("Mutex poisoned") + .get_stacks_blocks_processed(); + if blocks_processed > blocks_processed_before { + break; + } + thread::sleep(Duration::from_millis(100)); + } + + let info = get_chain_info_result(&naka_conf).unwrap(); + info!("Chain info: {:?}", info); + let last_stacks_block_height = info.stacks_tip_height as u128; + let last_nakamoto_block = last_stacks_block_height; + + // Mine more than 2 burn blocks to get the last block's reward matured + // (only 2 blocks maturation time in tests) + info!("Mining 6 tenures to mature the block reward"); + for i in 0..6 { + next_block_and_mine_commit( + &mut btc_regtest_controller, + 20, + &coord_channel, + &commits_submitted, + ) + .unwrap(); + info!("Mined a block ({i})"); + } + + let info = get_chain_info_result(&naka_conf).unwrap(); + info!("Chain info: {:?}", info); + let last_stacks_block_height = info.stacks_tip_height as u128; + let blocks = test_observer::get_blocks(); + + // Check the block reward is now matured in one of the tenure-change blocks + let mature_height = last_stacks_block_height - 4; + let expected_reward = get_expected_reward_for_height(&blocks, mature_height); + let result0 = call_read_only( + &naka_conf, + &sender_addr, + contract0_name, + "get-info", + vec![&clarity::vm::Value::UInt(mature_height)], + ); + let tuple0 = result0.expect_tuple().unwrap().data_map; + assert_eq!( + tuple0 + .get("block-reward") + .unwrap() + .clone() + .expect_optional() + .unwrap() + .unwrap(), + Value::UInt(expected_reward as u128) + ); + + let result1 = call_read_only( + &naka_conf, + &sender_addr, + contract1_name, + "get-info", + vec![&clarity::vm::Value::UInt(mature_height)], + ); + let tuple1 = result1.expect_tuple().unwrap().data_map; + assert_eq!(tuple0, tuple1); + + let result3_tenure = call_read_only( + &naka_conf, + &sender_addr, + contract3_name, + "get-tenure-info", + vec![&clarity::vm::Value::UInt(mature_height)], + ); + let tuple3_tenure = result3_tenure.expect_tuple().unwrap().data_map; + assert_eq!( + tuple3_tenure.get("block-reward"), + tuple0.get("block-reward") + ); + + // Check the block reward is now matured in one of the Nakamoto blocks + let expected_reward = get_expected_reward_for_height(&blocks, last_nakamoto_block); + + let result0 = call_read_only( + &naka_conf, + &sender_addr, + contract0_name, + "get-info", + vec![&clarity::vm::Value::UInt(last_nakamoto_block)], + ); + let tuple0 = result0.expect_tuple().unwrap().data_map; + assert_eq!( + tuple0 + .get("block-reward") + .unwrap() + .clone() + .expect_optional() + .unwrap() + .unwrap(), + Value::UInt(expected_reward as u128) + ); + + let result1 = call_read_only( + &naka_conf, + &sender_addr, + contract1_name, + "get-info", + vec![&clarity::vm::Value::UInt(last_nakamoto_block)], + ); + let tuple1 = result1.expect_tuple().unwrap().data_map; + assert_eq!(tuple0, tuple1); + + let result3_tenure = call_read_only( + &naka_conf, + &sender_addr, + contract3_name, + "get-tenure-info", + vec![&clarity::vm::Value::UInt(last_nakamoto_block)], + ); + let tuple3_tenure = result3_tenure.expect_tuple().unwrap().data_map; + assert_eq!( + tuple3_tenure.get("block-reward"), + tuple0.get("block-reward") + ); + + coord_channel + .lock() + .expect("Mutex poisoned") + .stop_chains_coordinator(); + run_loop_stopper.store(false, Ordering::SeqCst); + + run_loop_thread.join().unwrap(); +} + +/// Test Nakamoto mock miner by booting a follower node +#[test] +#[ignore] +fn mock_mining() { + if env::var("BITCOIND_TEST") != Ok("1".into()) { + return; + } + + let (mut naka_conf, _miner_account) = naka_neon_integration_conf(None); + naka_conf.miner.wait_on_interim_blocks = Duration::from_secs(1); + let sender_sk = Secp256k1PrivateKey::new(); + let sender_signer_sk = Secp256k1PrivateKey::new(); + let sender_signer_addr = tests::to_addr(&sender_signer_sk); + let mut signers = TestSigners::new(vec![sender_signer_sk.clone()]); + let tenure_count = 3; + let inter_blocks_per_tenure = 3; + // setup sender + recipient for some test stx transfers + // these are necessary for the interim blocks to get mined at all + let sender_addr = tests::to_addr(&sender_sk); + let send_amt = 100; + let send_fee = 180; + + let node_1_rpc = 51024; + let node_1_p2p = 51023; + let node_2_rpc = 51026; + let node_2_p2p = 51025; + + let localhost = "127.0.0.1"; + naka_conf.node.rpc_bind = format!("{}:{}", localhost, node_1_rpc); + naka_conf.node.p2p_bind = format!("{}:{}", localhost, node_1_p2p); + naka_conf.node.data_url = format!("http://{}:{}", localhost, node_1_rpc); + naka_conf.node.p2p_address = format!("{}:{}", localhost, node_1_p2p); + let http_origin = format!("http://{}", &naka_conf.node.rpc_bind); + + naka_conf.add_initial_balance( + PrincipalData::from(sender_addr.clone()).to_string(), + (send_amt + send_fee) * tenure_count * inter_blocks_per_tenure, + ); + naka_conf.add_initial_balance( + PrincipalData::from(sender_signer_addr.clone()).to_string(), + 100000, + ); + let recipient = PrincipalData::from(StacksAddress::burn_address(false)); + let stacker_sk = setup_stacker(&mut naka_conf); + + test_observer::spawn(); + let observer_port = test_observer::EVENT_OBSERVER_PORT; + naka_conf.events_observers.insert(EventObserverConfig { + endpoint: format!("localhost:{observer_port}"), + events_keys: vec![EventKeyType::AnyEvent], + }); + + let mut btcd_controller = BitcoinCoreController::new(naka_conf.clone()); + btcd_controller + .start_bitcoind() + .expect("Failed starting bitcoind"); + let mut btc_regtest_controller = BitcoinRegtestController::new(naka_conf.clone(), None); + btc_regtest_controller.bootstrap_chain(201); + + let mut run_loop = boot_nakamoto::BootRunLoop::new(naka_conf.clone()).unwrap(); + let run_loop_stopper = run_loop.get_termination_switch(); + let Counters { + blocks_processed, + naka_submitted_commits: commits_submitted, + naka_proposed_blocks: proposals_submitted, + .. + } = run_loop.counters(); + + let coord_channel = run_loop.coordinator_channels(); + + let run_loop_thread = thread::Builder::new() + .name("run_loop".into()) + .spawn(move || run_loop.start(None, 0)) + .unwrap(); + wait_for_runloop(&blocks_processed); + boot_to_epoch_3( + &naka_conf, + &blocks_processed, + &[stacker_sk], + &[sender_signer_sk], + &mut Some(&mut signers), + &mut btc_regtest_controller, + ); + + info!("Bootstrapped to Epoch-3.0 boundary, starting nakamoto miner"); + + let burnchain = naka_conf.get_burnchain(); + let sortdb = burnchain.open_sortition_db(true).unwrap(); + let (chainstate, _) = StacksChainState::open( + naka_conf.is_mainnet(), + naka_conf.burnchain.chain_id, + &naka_conf.get_chainstate_path_str(), + None, + ) + .unwrap(); + + let block_height_pre_3_0 = + NakamotoChainState::get_canonical_block_header(chainstate.db(), &sortdb) + .unwrap() + .unwrap() + .stacks_block_height; + + info!("Nakamoto miner started..."); + blind_signer(&naka_conf, &signers, proposals_submitted); + + // Wait one block to confirm the VRF register, wait until a block commit is submitted + wait_for_first_naka_block_commit(60, &commits_submitted); + + let mut follower_conf = naka_conf.clone(); + follower_conf.node.mock_mining = true; + follower_conf.events_observers.clear(); + follower_conf.node.working_dir = format!("{}-follower", &naka_conf.node.working_dir); + follower_conf.node.seed = vec![0x01; 32]; + follower_conf.node.local_peer_seed = vec![0x02; 32]; + + follower_conf.node.rpc_bind = format!("{}:{}", localhost, node_2_rpc); + follower_conf.node.p2p_bind = format!("{}:{}", localhost, node_2_p2p); + follower_conf.node.data_url = format!("http://{}:{}", localhost, node_2_rpc); + follower_conf.node.p2p_address = format!("{}:{}", localhost, node_2_p2p); + + let node_info = get_chain_info(&naka_conf); + follower_conf.node.add_bootstrap_node( + &format!( + "{}@{}", + &node_info.node_public_key.unwrap(), + naka_conf.node.p2p_bind + ), + CHAIN_ID_TESTNET, + PEER_VERSION_TESTNET, + ); + + let mut follower_run_loop = boot_nakamoto::BootRunLoop::new(follower_conf.clone()).unwrap(); + let follower_run_loop_stopper = follower_run_loop.get_termination_switch(); + let follower_coord_channel = follower_run_loop.coordinator_channels(); + + let Counters { + naka_mined_blocks: follower_naka_mined_blocks, + .. + } = follower_run_loop.counters(); + + let mock_mining_blocks_start = follower_naka_mined_blocks.load(Ordering::SeqCst); + + debug!( + "Booting follower-thread ({},{})", + &follower_conf.node.p2p_bind, &follower_conf.node.rpc_bind + ); + debug!( + "Booting follower-thread: neighbors = {:?}", + &follower_conf.node.bootstrap_node + ); + + // spawn a follower thread + let follower_thread = thread::Builder::new() + .name("follower-thread".into()) + .spawn(move || follower_run_loop.start(None, 0)) + .unwrap(); + + debug!("Booted follower-thread"); + + // Mine `tenure_count` nakamoto tenures + for tenure_ix in 0..tenure_count { + let follower_naka_mined_blocks_before = follower_naka_mined_blocks.load(Ordering::SeqCst); + + let commits_before = commits_submitted.load(Ordering::SeqCst); + next_block_and_process_new_stacks_block(&mut btc_regtest_controller, 60, &coord_channel) + .unwrap(); + + let mut last_tip = BlockHeaderHash([0x00; 32]); + let mut last_tip_height = 0; + + // mine the interim blocks + for interim_block_ix in 0..inter_blocks_per_tenure { + let blocks_processed_before = coord_channel + .lock() + .expect("Mutex poisoned") + .get_stacks_blocks_processed(); + // submit a tx so that the miner will mine an extra block + let sender_nonce = tenure_ix * inter_blocks_per_tenure + interim_block_ix; + let transfer_tx = + make_stacks_transfer(&sender_sk, sender_nonce, send_fee, &recipient, send_amt); + submit_tx(&http_origin, &transfer_tx); + + loop { + let blocks_processed = coord_channel + .lock() + .expect("Mutex poisoned") + .get_stacks_blocks_processed(); + if blocks_processed > blocks_processed_before { + break; + } + thread::sleep(Duration::from_millis(100)); + } + + let info = get_chain_info_result(&naka_conf).unwrap(); + assert_ne!(info.stacks_tip, last_tip); + assert_ne!(info.stacks_tip_height, last_tip_height); + + last_tip = info.stacks_tip; + last_tip_height = info.stacks_tip_height; + } + + let mock_miner_timeout = Instant::now(); + while follower_naka_mined_blocks.load(Ordering::SeqCst) <= follower_naka_mined_blocks_before + { + if mock_miner_timeout.elapsed() >= Duration::from_secs(60) { + panic!( + "Timed out waiting for mock miner block {}", + follower_naka_mined_blocks_before + 1 + ); + } + thread::sleep(Duration::from_millis(100)); + } + + let start_time = Instant::now(); + while commits_submitted.load(Ordering::SeqCst) <= commits_before { + if start_time.elapsed() >= Duration::from_secs(20) { + panic!("Timed out waiting for block-commit"); + } + thread::sleep(Duration::from_millis(100)); + } + } + + // load the chain tip, and assert that it is a nakamoto block and at least 30 blocks have advanced in epoch 3 + let tip = NakamotoChainState::get_canonical_block_header(chainstate.db(), &sortdb) + .unwrap() + .unwrap(); + info!( + "Latest tip"; + "height" => tip.stacks_block_height, + "is_nakamoto" => tip.anchored_header.as_stacks_nakamoto().is_some(), + ); + + let expected_blocks_mined = (inter_blocks_per_tenure + 1) * tenure_count; + let expected_tip_height = block_height_pre_3_0 + expected_blocks_mined; + assert!(tip.anchored_header.as_stacks_nakamoto().is_some()); + assert_eq!( + tip.stacks_block_height, expected_tip_height, + "Should have mined (1 + interim_blocks_per_tenure) * tenure_count nakamoto blocks" + ); + + // Check follower's mock miner + let mock_mining_blocks_end = follower_naka_mined_blocks.load(Ordering::SeqCst); + let blocks_mock_mined = mock_mining_blocks_end - mock_mining_blocks_start; + assert_eq!( + blocks_mock_mined, tenure_count, + "Should have mock mined `tenure_count` nakamoto blocks" + ); + + // wait for follower to reach the chain tip + loop { + sleep_ms(1000); + let follower_node_info = get_chain_info(&follower_conf); + + info!( + "Follower tip is now {}/{}", + &follower_node_info.stacks_tip_consensus_hash, &follower_node_info.stacks_tip + ); + if follower_node_info.stacks_tip_consensus_hash == tip.consensus_hash + && follower_node_info.stacks_tip == tip.anchored_header.block_hash() + { + break; + } + } + + coord_channel + .lock() + .expect("Mutex poisoned") + .stop_chains_coordinator(); + run_loop_stopper.store(false, Ordering::SeqCst); + + follower_coord_channel + .lock() + .expect("Mutex poisoned") + .stop_chains_coordinator(); + follower_run_loop_stopper.store(false, Ordering::SeqCst); + + run_loop_thread.join().unwrap(); + follower_thread.join().unwrap(); +} diff --git a/testnet/stacks-node/src/tests/neon_integrations.rs b/testnet/stacks-node/src/tests/neon_integrations.rs index 2dce43b661d..ac6a3ea978c 100644 --- a/testnet/stacks-node/src/tests/neon_integrations.rs +++ b/testnet/stacks-node/src/tests/neon_integrations.rs @@ -8,10 +8,12 @@ use std::{cmp, env, fs, io, thread}; use clarity::vm::ast::stack_depth_checker::AST_CALL_STACK_DEPTH_BUFFER; use clarity::vm::ast::ASTRules; use clarity::vm::costs::ExecutionCost; +use clarity::vm::types::serialization::SerializationError; use clarity::vm::types::PrincipalData; use clarity::vm::{ClarityName, ClarityVersion, ContractName, Value, MAX_CALL_STACK_DEPTH}; use rand::{Rng, RngCore}; -use rusqlite::types::ToSql; +use rusqlite::params; +use serde::Deserialize; use serde_json::json; use stacks::burnchains::bitcoin::address::{BitcoinAddress, LegacyBitcoinAddressType}; use stacks::burnchains::bitcoin::BitcoinNetworkType; @@ -36,6 +38,7 @@ use stacks::chainstate::stacks::{ StacksPublicKey, StacksTransaction, TransactionContractCall, TransactionPayload, }; use stacks::clarity_cli::vm_execute as execute; +use stacks::codec::StacksMessageCodec; use stacks::core::mempool::MemPoolWalkTxTypes; use stacks::core::{ self, StacksEpoch, StacksEpochId, BLOCK_LIMIT_MAINNET_20, BLOCK_LIMIT_MAINNET_205, @@ -63,7 +66,6 @@ use stacks::util_lib::signed_structured_data::pox4::{ make_pox_4_signer_key_signature, Pox4SignatureTopic, }; use stacks_common::address::AddressHashMode; -use stacks_common::codec::StacksMessageCodec; use stacks_common::types::chainstate::{ BlockHeaderHash, BurnchainHeaderHash, StacksAddress, StacksBlockId, }; @@ -149,7 +151,7 @@ fn inner_neon_integration_test_conf(seed: Option>) -> (Config, StacksAdd burnchain.peer_host = Some("127.0.0.1".to_string()); } - let magic_bytes = Config::from_config_file(cfile) + let magic_bytes = Config::from_config_file(cfile, false) .unwrap() .burnchain .magic_bytes; @@ -184,7 +186,9 @@ pub fn neon_integration_test_conf_with_seed(seed: Vec) -> (Config, StacksAdd } pub mod test_observer { + use std::collections::HashSet; use std::convert::Infallible; + use std::ops::{Bound, RangeBounds}; use std::sync::Mutex; use std::thread; @@ -564,6 +568,38 @@ pub mod test_observer { ATTACHMENTS.lock().unwrap().clear(); PROPOSAL_RESPONSES.lock().unwrap().clear(); } + + pub fn contains_burn_block_range(range: impl RangeBounds) -> Result<(), String> { + // Get set of all burn block heights + let burn_block_heights = get_blocks() + .iter() + .map(|x| x.get("burn_block_height").unwrap().as_u64().unwrap()) + .collect::>(); + + let start = match range.start_bound() { + Bound::Unbounded => return Err("Unbounded ranges not supported".into()), + Bound::Included(&x) => x, + Bound::Excluded(&x) => x.saturating_add(1), + }; + + let end = match range.end_bound() { + Bound::Unbounded => return Err("Unbounded ranges not supported".into()), + Bound::Included(&x) => x, + Bound::Excluded(&x) => x.saturating_sub(1), + }; + + // Find indexes in range for which we don't have burn block in set + let missing = (start..=end) + .into_iter() + .filter(|i| !burn_block_heights.contains(&i)) + .collect::>(); + + if missing.is_empty() { + Ok(()) + } else { + Err(format!("Missing the following burn blocks: {missing:?}")) + } + } } const PANIC_TIMEOUT_SECS: u64 = 600; @@ -825,7 +861,7 @@ pub fn get_chain_info_result(conf: &Config) -> Result() + client.get(&path).send()?.json::() } pub fn get_chain_info_opt(conf: &Config) -> Option { @@ -856,6 +892,55 @@ pub fn get_tip_anchored_block(conf: &Config) -> (ConsensusHash, StacksBlock) { (stacks_tip_consensus_hash, block) } +#[derive(Deserialize, Debug)] +struct ReadOnlyResponse { + #[serde(rename = "okay")] + _okay: bool, + #[serde(rename = "result")] + result_hex: String, +} + +impl ReadOnlyResponse { + pub fn result(&self) -> Result { + Value::try_deserialize_hex_untyped(&self.result_hex) + } +} + +pub fn call_read_only( + conf: &Config, + principal: &StacksAddress, + contract: &str, + function: &str, + args: Vec<&Value>, +) -> Value { + let http_origin = format!("http://{}", &conf.node.rpc_bind); + let client = reqwest::blocking::Client::new(); + + let path = format!( + "{http_origin}/v2/contracts/call-read/{}/{}/{}", + principal, contract, function + ); + + let serialized_args = args + .iter() + .map(|arg| arg.serialize_to_hex().unwrap()) + .collect::>(); + + let body = json!({ + "arguments": serialized_args, + "sender": principal.to_string(), + }); + let response: ReadOnlyResponse = client + .post(path) + .header("Content-Type", "application/json") + .body(body.to_string()) + .send() + .unwrap() + .json() + .unwrap(); + response.result().unwrap() +} + fn find_microblock_privkey( conf: &Config, pubkey_hash: &Hash160, @@ -1214,21 +1299,30 @@ pub struct Account { pub nonce: u64, } -pub fn get_account(http_origin: &str, account: &F) -> Account { +pub fn get_account_result( + http_origin: &str, + account: &F, +) -> Result { let client = reqwest::blocking::Client::new(); let path = format!("{}/v2/accounts/{}?proof=0", http_origin, account); - let res = client - .get(&path) - .send() - .unwrap() - .json::() - .unwrap(); + let res = client.get(&path).send()?.json::()?; info!("Account response: {:#?}", res); - Account { + Ok(Account { balance: u128::from_str_radix(&res.balance[2..], 16).unwrap(), locked: u128::from_str_radix(&res.locked[2..], 16).unwrap(), nonce: res.nonce, - } + }) +} + +pub fn get_account(http_origin: &str, account: &F) -> Account { + get_account_result(http_origin, account).unwrap() +} + +pub fn get_neighbors(conf: &Config) -> Option { + let client = reqwest::blocking::Client::new(); + let http_origin = format!("http://{}", &conf.node.rpc_bind); + let path = format!("{}/v2/neighbors", http_origin); + client.get(&path).send().ok()?.json().ok() } pub fn get_pox_info(http_origin: &str) -> Option { @@ -3418,12 +3512,22 @@ fn microblock_fork_poison_integration_test() { .unwrap(); chainstate - .reload_unconfirmed_state(&btc_regtest_controller.sortdb_ref().index_conn(), tip_hash) + .reload_unconfirmed_state( + &btc_regtest_controller + .sortdb_ref() + .index_handle_at_block(&chainstate, &tip_hash) + .unwrap(), + tip_hash, + ) + .unwrap(); + let iconn = btc_regtest_controller + .sortdb_ref() + .index_handle_at_block(&chainstate, &tip_hash) .unwrap(); let first_microblock = make_microblock( &privk, &mut chainstate, - &btc_regtest_controller.sortdb_ref().index_conn(), + &iconn, consensus_hash, stacks_block.clone(), vec![unconfirmed_tx], @@ -3673,12 +3777,22 @@ fn microblock_integration_test() { .unwrap(); chainstate - .reload_unconfirmed_state(&btc_regtest_controller.sortdb_ref().index_conn(), tip_hash) + .reload_unconfirmed_state( + &btc_regtest_controller + .sortdb_ref() + .index_handle_at_block(&chainstate, &tip_hash) + .unwrap(), + tip_hash, + ) + .unwrap(); + let iconn = btc_regtest_controller + .sortdb_ref() + .index_handle_at_block(&chainstate, &tip_hash) .unwrap(); let first_microblock = make_microblock( &privk, &mut chainstate, - &btc_regtest_controller.sortdb_ref().index_conn(), + &iconn, consensus_hash, stacks_block.clone(), vec![unconfirmed_tx], @@ -8694,7 +8808,7 @@ fn atlas_stress_integration_test() { let mut hashes = query_row_columns::( &atlasdb.conn, "SELECT content_hash FROM attachment_instances WHERE index_block_hash = ?1 AND attachment_index = ?2", - &[ibh as &dyn ToSql, &u64_to_sql(*index).unwrap() as &dyn ToSql], + params![ibh, u64_to_sql(*index).unwrap()], "content_hash") .unwrap(); if hashes.len() > 0 { @@ -9089,7 +9203,13 @@ fn use_latest_tip_integration_test() { // Initialize the unconfirmed state. chainstate - .reload_unconfirmed_state(&btc_regtest_controller.sortdb_ref().index_conn(), tip_hash) + .reload_unconfirmed_state( + &btc_regtest_controller + .sortdb_ref() + .index_handle_at_block(&chainstate, &tip_hash) + .unwrap(), + tip_hash, + ) .unwrap(); // Make microblock with two transactions. @@ -9109,10 +9229,14 @@ fn use_latest_tip_integration_test() { let vec_tx = vec![tx_1, tx_2]; let privk = find_microblock_privkey(&conf, &stacks_block.header.microblock_pubkey_hash, 1024).unwrap(); + let iconn = btc_regtest_controller + .sortdb_ref() + .index_handle_at_block(&chainstate, &tip_hash) + .unwrap(); let mblock = make_microblock( &privk, &mut chainstate, - &btc_regtest_controller.sortdb_ref().index_conn(), + &iconn, consensus_hash, stacks_block.clone(), vec_tx, diff --git a/testnet/stacks-node/src/tests/signer/mod.rs b/testnet/stacks-node/src/tests/signer/mod.rs new file mode 100644 index 00000000000..fe7bf771041 --- /dev/null +++ b/testnet/stacks-node/src/tests/signer/mod.rs @@ -0,0 +1,785 @@ +// Copyright (C) 2020-2024 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . +mod v0; +mod v1; + +use std::collections::HashSet; +// Copyright (C) 2020-2024 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . +use std::sync::atomic::{AtomicBool, AtomicU64, Ordering}; +use std::sync::{Arc, Mutex}; +use std::thread; +use std::time::{Duration, Instant}; + +use clarity::boot_util::boot_code_id; +use clarity::vm::types::PrincipalData; +use libsigner::{SignerEntries, SignerEventTrait}; +use stacks::chainstate::coordinator::comm::CoordinatorChannels; +use stacks::chainstate::nakamoto::signer_set::NakamotoSigners; +use stacks::chainstate::stacks::boot::{NakamotoSignerEntry, SIGNERS_NAME}; +use stacks::chainstate::stacks::{StacksPrivateKey, ThresholdSignature}; +use stacks::core::StacksEpoch; +use stacks::net::api::postblock_proposal::BlockValidateResponse; +use stacks::types::chainstate::StacksAddress; +use stacks::util::secp256k1::{MessageSignature, Secp256k1PublicKey}; +use stacks_common::codec::StacksMessageCodec; +use stacks_common::consts::SIGNER_SLOTS_PER_USER; +use stacks_common::types::StacksEpochId; +use stacks_common::util::hash::{hex_bytes, Sha512Trunc256Sum}; +use stacks_signer::client::{ClientError, SignerSlotID, StacksClient}; +use stacks_signer::config::{build_signer_config_tomls, GlobalConfig as SignerConfig, Network}; +use stacks_signer::runloop::{SignerResult, State, StateInfo}; +use stacks_signer::{Signer, SpawnedSigner}; +use wsts::state_machine::PublicKeys; + +use super::nakamoto_integrations::wait_for; +use crate::config::{Config as NeonConfig, EventKeyType, EventObserverConfig, InitialBalance}; +use crate::event_dispatcher::MinedNakamotoBlockEvent; +use crate::neon::{Counters, TestFlag}; +use crate::run_loop::boot_nakamoto; +use crate::tests::bitcoin_regtest::BitcoinCoreController; +use crate::tests::nakamoto_integrations::{ + naka_neon_integration_conf, next_block_and_mine_commit, next_block_and_wait_for_commits, + POX_4_DEFAULT_STACKER_BALANCE, +}; +use crate::tests::neon_integrations::{ + get_chain_info, next_block_and_wait, run_until_burnchain_height, test_observer, + wait_for_runloop, +}; +use crate::tests::to_addr; +use crate::{BitcoinRegtestController, BurnchainController}; + +// Helper struct for holding the btc and stx neon nodes +#[allow(dead_code)] +pub struct RunningNodes { + pub btc_regtest_controller: BitcoinRegtestController, + pub btcd_controller: BitcoinCoreController, + pub run_loop_thread: thread::JoinHandle<()>, + pub run_loop_stopper: Arc, + pub vrfs_submitted: Arc, + pub commits_submitted: Arc, + pub blocks_processed: Arc, + pub nakamoto_blocks_proposed: Arc, + pub nakamoto_blocks_mined: Arc, + pub nakamoto_test_skip_commit_op: TestFlag, + pub coord_channel: Arc>, + pub conf: NeonConfig, +} + +/// A test harness for running a v0 or v1 signer integration test +pub struct SignerTest { + // The stx and bitcoin nodes and their run loops + pub running_nodes: RunningNodes, + // The spawned signers and their threads + pub spawned_signers: Vec, + // The spawned signers and their threads + pub signer_configs: Vec, + // the private keys of the signers + pub signer_stacks_private_keys: Vec, + // link to the stacks node + pub stacks_client: StacksClient, + // Unique number used to isolate files created during the test + pub run_stamp: u16, + /// The number of cycles to stack for + pub num_stacking_cycles: u64, +} + +impl + Send + 'static, T: SignerEventTrait + 'static> SignerTest> { + fn new( + num_signers: usize, + initial_balances: Vec<(StacksAddress, u64)>, + wait_on_signers: Option, + ) -> Self { + Self::new_with_config_modifications( + num_signers, + initial_balances, + wait_on_signers, + |_| {}, + |_| {}, + &[], + ) + } + + fn new_with_config_modifications< + F: FnMut(&mut SignerConfig) -> (), + G: FnMut(&mut NeonConfig) -> (), + >( + num_signers: usize, + initial_balances: Vec<(StacksAddress, u64)>, + wait_on_signers: Option, + mut signer_config_modifier: F, + mut node_config_modifier: G, + btc_miner_pubkeys: &[Secp256k1PublicKey], + ) -> Self { + // Generate Signer Data + let signer_stacks_private_keys = (0..num_signers) + .map(|_| StacksPrivateKey::new()) + .collect::>(); + + let (mut naka_conf, _miner_account) = naka_neon_integration_conf(None); + + node_config_modifier(&mut naka_conf); + + // Add initial balances to the config + for (address, amount) in initial_balances.iter() { + naka_conf + .add_initial_balance(PrincipalData::from(address.clone()).to_string(), *amount); + } + + // So the combination is... one, two, three, four, five? That's the stupidest combination I've ever heard in my life! + // That's the kind of thing an idiot would have on his luggage! + let password = "12345"; + naka_conf.connection_options.block_proposal_token = Some(password.to_string()); + if let Some(wait_on_signers) = wait_on_signers { + naka_conf.miner.wait_on_signers = wait_on_signers; + } else { + naka_conf.miner.wait_on_signers = Duration::from_secs(10); + } + let run_stamp = rand::random(); + + // Setup the signer and coordinator configurations + let signer_configs: Vec<_> = build_signer_config_tomls( + &signer_stacks_private_keys, + &naka_conf.node.rpc_bind, + Some(Duration::from_millis(128)), // Timeout defaults to 5 seconds. Let's override it to 128 milliseconds. + &Network::Testnet, + password, + run_stamp, + 3000, + Some(100_000), + None, + Some(9000), + ) + .into_iter() + .map(|toml| { + let mut signer_config = SignerConfig::load_from_str(&toml).unwrap(); + signer_config_modifier(&mut signer_config); + signer_config + }) + .collect(); + assert_eq!(signer_configs.len(), num_signers); + + let spawned_signers = signer_configs + .iter() + .cloned() + .map(SpawnedSigner::new) + .collect(); + + // Setup the nodes and deploy the contract to it + let btc_miner_pubkeys = if btc_miner_pubkeys.is_empty() { + let pk = Secp256k1PublicKey::from_hex( + naka_conf + .burnchain + .local_mining_public_key + .as_ref() + .unwrap(), + ) + .unwrap(); + vec![pk] + } else { + btc_miner_pubkeys.to_vec() + }; + let node = setup_stx_btc_node( + naka_conf, + &signer_stacks_private_keys, + &signer_configs, + btc_miner_pubkeys.as_slice(), + node_config_modifier, + ); + let config = signer_configs.first().unwrap(); + let stacks_client = StacksClient::from(config); + + Self { + running_nodes: node, + spawned_signers, + signer_stacks_private_keys, + stacks_client, + run_stamp, + num_stacking_cycles: 12_u64, + signer_configs, + } + } + + /// Send a status request to each spawned signer + pub fn send_status_request(&self, exclude: &HashSet) { + for signer_ix in 0..self.spawned_signers.len() { + if exclude.contains(&signer_ix) { + continue; + } + let port = 3000 + signer_ix; + let endpoint = format!("http://localhost:{}", port); + let path = format!("{endpoint}/status"); + let client = reqwest::blocking::Client::new(); + let response = client + .get(path) + .send() + .expect("Failed to send status request"); + assert!(response.status().is_success()) + } + } + + pub fn wait_for_registered(&mut self, timeout_secs: u64) { + let mut finished_signers = HashSet::new(); + wait_for(timeout_secs, || { + self.send_status_request(&finished_signers); + thread::sleep(Duration::from_secs(1)); + let latest_states = self.get_states(&finished_signers); + for (ix, state) in latest_states.iter().enumerate() { + let Some(state) = state else { continue; }; + if state.runloop_state == State::RegisteredSigners { + finished_signers.insert(ix); + } else { + warn!("Signer #{ix} returned state = {:?}, will try to wait for a registered signers state from them.", state.runloop_state); + } + } + info!("Finished signers: {:?}", finished_signers.iter().collect::>()); + Ok(finished_signers.len() == self.spawned_signers.len()) + }).unwrap(); + } + + pub fn wait_for_cycle(&mut self, timeout_secs: u64, reward_cycle: u64) { + let mut finished_signers = HashSet::new(); + wait_for(timeout_secs, || { + self.send_status_request(&finished_signers); + thread::sleep(Duration::from_secs(1)); + let latest_states = self.get_states(&finished_signers); + for (ix, state) in latest_states.iter().enumerate() { + let Some(state) = state else { continue; }; + let Some(reward_cycle_info) = state.reward_cycle_info else { continue; }; + if reward_cycle_info.reward_cycle == reward_cycle { + finished_signers.insert(ix); + } else { + warn!("Signer #{ix} returned state = {:?}, will try to wait for a cycle = {} state from them.", state, reward_cycle); + } + } + info!("Finished signers: {:?}", finished_signers.iter().collect::>()); + Ok(finished_signers.len() == self.spawned_signers.len()) + }).unwrap(); + } + + /// Get status check results (if returned) from each signer without blocking + /// Returns Some() or None() for each signer, in order of `self.spawned_signers` + pub fn get_states(&mut self, exclude: &HashSet) -> Vec> { + let mut output = Vec::new(); + for (ix, signer) in self.spawned_signers.iter().enumerate() { + if exclude.contains(&ix) { + output.push(None); + continue; + } + let Ok(mut results) = signer.res_recv.try_recv() else { + debug!("Could not receive latest state from signer #{ix}"); + output.push(None); + continue; + }; + if results.len() > 1 { + warn!("Received multiple states from the signer receiver: this test function assumes it should only ever receive 1"); + panic!(); + } + let Some(result) = results.pop() else { + debug!("Could not receive latest state from signer #{ix}"); + output.push(None); + continue; + }; + match result { + SignerResult::OperationResult(_operation) => { + panic!("Recieved an operation result."); + } + SignerResult::StatusCheck(state_info) => { + output.push(Some(state_info)); + } + } + } + output + } + + fn nmb_blocks_to_reward_set_calculation(&mut self) -> u64 { + let prepare_phase_len = self + .running_nodes + .conf + .get_burnchain() + .pox_constants + .prepare_length as u64; + let current_block_height = self + .running_nodes + .btc_regtest_controller + .get_headers_height() + .saturating_sub(1); // Must subtract 1 since get_headers_height returns current block height + 1 + let curr_reward_cycle = self.get_current_reward_cycle(); + let next_reward_cycle = curr_reward_cycle.saturating_add(1); + let next_reward_cycle_height = self + .running_nodes + .btc_regtest_controller + .get_burnchain() + .reward_cycle_to_block_height(next_reward_cycle); + let next_reward_cycle_reward_set_calculation = next_reward_cycle_height + .saturating_sub(prepare_phase_len) + .saturating_add(1); // +1 as the reward calculation occurs in the SECOND block of the prepare phase/ + + next_reward_cycle_reward_set_calculation.saturating_sub(current_block_height) + } + + fn nmb_blocks_to_reward_cycle_boundary(&mut self, reward_cycle: u64) -> u64 { + let current_block_height = self + .running_nodes + .btc_regtest_controller + .get_headers_height() + .saturating_sub(1); // Must subtract 1 since get_headers_height returns current block height + 1 + let reward_cycle_height = self + .running_nodes + .btc_regtest_controller + .get_burnchain() + .reward_cycle_to_block_height(reward_cycle); + reward_cycle_height.saturating_sub(current_block_height) + } + + fn mine_nakamoto_block(&mut self, timeout: Duration) -> MinedNakamotoBlockEvent { + let commits_submitted = self.running_nodes.commits_submitted.clone(); + let mined_block_time = Instant::now(); + next_block_and_mine_commit( + &mut self.running_nodes.btc_regtest_controller, + timeout.as_secs(), + &self.running_nodes.coord_channel, + &commits_submitted, + ) + .unwrap(); + + let t_start = Instant::now(); + while test_observer::get_mined_nakamoto_blocks().is_empty() { + assert!( + t_start.elapsed() < timeout, + "Timed out while waiting for mined nakamoto block event" + ); + thread::sleep(Duration::from_secs(1)); + } + let mined_block_elapsed_time = mined_block_time.elapsed(); + info!( + "Nakamoto block mine time elapsed: {:?}", + mined_block_elapsed_time + ); + test_observer::get_mined_nakamoto_blocks().pop().unwrap() + } + + fn mine_block_wait_on_processing( + &mut self, + coord_channels: &[&Arc>], + commits_submitted: &[&Arc], + timeout: Duration, + ) { + let blocks_len = test_observer::get_blocks().len(); + let mined_block_time = Instant::now(); + next_block_and_wait_for_commits( + &mut self.running_nodes.btc_regtest_controller, + timeout.as_secs(), + coord_channels, + commits_submitted, + ) + .unwrap(); + let t_start = Instant::now(); + while test_observer::get_blocks().len() <= blocks_len { + assert!( + t_start.elapsed() < timeout, + "Timed out while waiting for nakamoto block to be processed" + ); + thread::sleep(Duration::from_secs(1)); + } + let mined_block_elapsed_time = mined_block_time.elapsed(); + info!( + "Nakamoto block mine time elapsed: {:?}", + mined_block_elapsed_time + ); + } + + fn wait_for_confirmed_block_v1( + &mut self, + block_signer_sighash: &Sha512Trunc256Sum, + timeout: Duration, + ) -> ThresholdSignature { + let block_obj = self.wait_for_confirmed_block_with_hash(block_signer_sighash, timeout); + let signer_signature_hex = block_obj.get("signer_signature").unwrap().as_str().unwrap(); + let signer_signature_bytes = hex_bytes(&signer_signature_hex[2..]).unwrap(); + let signer_signature = + ThresholdSignature::consensus_deserialize(&mut signer_signature_bytes.as_slice()) + .unwrap(); + signer_signature + } + + /// Wait for a confirmed block and return a list of individual + /// signer signatures + fn wait_for_confirmed_block_v0( + &mut self, + block_signer_sighash: &Sha512Trunc256Sum, + timeout: Duration, + ) -> Vec { + let block_obj = self.wait_for_confirmed_block_with_hash(block_signer_sighash, timeout); + block_obj + .get("signer_signature") + .unwrap() + .as_array() + .expect("Expected signer_signature to be an array") + .iter() + .cloned() + .map(serde_json::from_value::) + .collect::, _>>() + .expect("Unable to deserialize array of MessageSignature") + } + + /// Wait for a confirmed block and return a list of individual + /// signer signatures + fn wait_for_confirmed_block_with_hash( + &mut self, + block_signer_sighash: &Sha512Trunc256Sum, + timeout: Duration, + ) -> serde_json::Map { + let t_start = Instant::now(); + while t_start.elapsed() <= timeout { + let blocks = test_observer::get_blocks(); + if let Some(block) = blocks.iter().find_map(|block_json| { + let block_obj = block_json.as_object().unwrap(); + let sighash = block_obj + // use the try operator because non-nakamoto blocks + // do not supply this field + .get("signer_signature_hash")? + .as_str() + .unwrap(); + if sighash != &format!("0x{block_signer_sighash}") { + return None; + } + Some(block_obj.clone()) + }) { + return block; + } + thread::sleep(Duration::from_millis(500)); + } + panic!("Timed out while waiting for confirmation of block with signer sighash = {block_signer_sighash}") + } + + fn wait_for_block_validate_response(&mut self, timeout: Duration) -> BlockValidateResponse { + // Wait for the block to show up in the test observer + let t_start = Instant::now(); + while test_observer::get_proposal_responses().is_empty() { + assert!( + t_start.elapsed() < timeout, + "Timed out while waiting for block proposal response event" + ); + thread::sleep(Duration::from_secs(1)); + } + test_observer::get_proposal_responses() + .pop() + .expect("No block proposal") + } + + fn wait_for_validate_ok_response(&mut self, timeout: Duration) -> Sha512Trunc256Sum { + let validate_response = self.wait_for_block_validate_response(timeout); + match validate_response { + BlockValidateResponse::Ok(block_validated) => block_validated.signer_signature_hash, + _ => panic!("Unexpected response"), + } + } + + fn wait_for_validate_reject_response(&mut self, timeout: Duration) -> Sha512Trunc256Sum { + // Wait for the block to show up in the test observer + let validate_response = self.wait_for_block_validate_response(timeout); + match validate_response { + BlockValidateResponse::Reject(block_rejection) => block_rejection.signer_signature_hash, + _ => panic!("Unexpected response"), + } + } + + // Must be called AFTER booting the chainstate + fn run_until_epoch_3_boundary(&mut self) { + let epochs = self.running_nodes.conf.burnchain.epochs.clone().unwrap(); + let epoch_3 = + &epochs[StacksEpoch::find_epoch_by_id(&epochs, StacksEpochId::Epoch30).unwrap()]; + + let epoch_30_boundary = epoch_3.start_height - 1; + // advance to epoch 3.0 and trigger a sign round (cannot vote on blocks in pre epoch 3.0) + run_until_burnchain_height( + &mut self.running_nodes.btc_regtest_controller, + &self.running_nodes.blocks_processed, + epoch_30_boundary, + &self.running_nodes.conf, + ); + info!("Advanced to Nakamoto epoch 3.0 boundary {epoch_30_boundary}! Ready to Sign Blocks!"); + } + + fn get_current_reward_cycle(&self) -> u64 { + let block_height = get_chain_info(&self.running_nodes.conf).burn_block_height; + let rc = self + .running_nodes + .btc_regtest_controller + .get_burnchain() + .block_height_to_reward_cycle(block_height) + .unwrap(); + info!("Get current reward cycle: block_height = {block_height}, rc = {rc}"); + rc + } + + fn get_signer_index(&self, reward_cycle: u64) -> SignerSlotID { + let valid_signer_set = + u32::try_from(reward_cycle % 2).expect("FATAL: reward_cycle % 2 exceeds u32::MAX"); + let signer_stackerdb_contract_id = boot_code_id(SIGNERS_NAME, false); + + self.stacks_client + .get_stackerdb_signer_slots(&signer_stackerdb_contract_id, valid_signer_set) + .expect("FATAL: failed to get signer slots from stackerdb") + .iter() + .position(|(address, _)| address == self.stacks_client.get_signer_address()) + .map(|pos| { + SignerSlotID(u32::try_from(pos).expect("FATAL: number of signers exceeds u32::MAX")) + }) + .expect("FATAL: signer not registered") + } + + fn get_signer_slots( + &self, + reward_cycle: u64, + ) -> Result, ClientError> { + let valid_signer_set = + u32::try_from(reward_cycle % 2).expect("FATAL: reward_cycle % 2 exceeds u32::MAX"); + let signer_stackerdb_contract_id = boot_code_id(SIGNERS_NAME, false); + + self.stacks_client + .get_stackerdb_signer_slots(&signer_stackerdb_contract_id, valid_signer_set) + } + + fn get_signer_indices(&self, reward_cycle: u64) -> Vec { + self.get_signer_slots(reward_cycle) + .expect("FATAL: failed to get signer slots from stackerdb") + .iter() + .enumerate() + .map(|(pos, _)| { + SignerSlotID(u32::try_from(pos).expect("FATAL: number of signers exceeds u32::MAX")) + }) + .collect::>() + } + + /// Get the wsts public keys for the given reward cycle + fn get_signer_public_keys(&self, reward_cycle: u64) -> PublicKeys { + let entries = self.get_reward_set_signers(reward_cycle); + let entries = SignerEntries::parse(false, &entries).unwrap(); + entries.public_keys + } + + /// Get the signers for the given reward cycle + pub fn get_reward_set_signers(&self, reward_cycle: u64) -> Vec { + self.stacks_client + .get_reward_set_signers(reward_cycle) + .unwrap() + .unwrap() + } + + #[allow(dead_code)] + fn get_signer_metrics(&self) -> String { + #[cfg(feature = "monitoring_prom")] + { + let client = reqwest::blocking::Client::new(); + let res = client + .get("http://localhost:9000/metrics") + .send() + .unwrap() + .text() + .unwrap(); + + return res; + } + #[cfg(not(feature = "monitoring_prom"))] + return String::new(); + } + + /// Kills the signer runloop at index `signer_idx` + /// and returns the private key of the killed signer. + /// + /// # Panics + /// Panics if `signer_idx` is out of bounds + pub fn stop_signer(&mut self, signer_idx: usize) -> StacksPrivateKey { + let spawned_signer = self.spawned_signers.remove(signer_idx); + let signer_key = self.signer_stacks_private_keys.remove(signer_idx); + + spawned_signer.stop(); + signer_key + } + + /// (Re)starts a new signer runloop with the given private key + pub fn restart_signer(&mut self, signer_idx: usize, signer_private_key: StacksPrivateKey) { + let signer_config = build_signer_config_tomls( + &[signer_private_key], + &self.running_nodes.conf.node.rpc_bind, + Some(Duration::from_millis(128)), // Timeout defaults to 5 seconds. Let's override it to 128 milliseconds. + &Network::Testnet, + "12345", // It worked sir, we have the combination! -Great, what's the combination? + self.run_stamp, + 3000 + signer_idx, + Some(100_000), + None, + Some(9000 + signer_idx), + ) + .pop() + .unwrap(); + + info!("Restarting signer"); + let config = SignerConfig::load_from_str(&signer_config).unwrap(); + let signer = SpawnedSigner::new(config); + self.spawned_signers.insert(signer_idx, signer); + } + + pub fn shutdown(self) { + self.running_nodes + .coord_channel + .lock() + .expect("Mutex poisoned") + .stop_chains_coordinator(); + + self.running_nodes + .run_loop_stopper + .store(false, Ordering::SeqCst); + self.running_nodes.run_loop_thread.join().unwrap(); + for signer in self.spawned_signers { + assert!(signer.stop().is_none()); + } + } +} + +fn setup_stx_btc_node ()>( + mut naka_conf: NeonConfig, + signer_stacks_private_keys: &[StacksPrivateKey], + signer_configs: &[SignerConfig], + btc_miner_pubkeys: &[Secp256k1PublicKey], + mut node_config_modifier: G, +) -> RunningNodes { + // Spawn the endpoints for observing signers + for signer_config in signer_configs { + naka_conf.events_observers.insert(EventObserverConfig { + endpoint: signer_config.endpoint.to_string(), + events_keys: vec![ + EventKeyType::StackerDBChunks, + EventKeyType::BlockProposal, + EventKeyType::BurnchainBlocks, + ], + }); + } + + // Spawn a test observer for verification purposes + test_observer::spawn(); + let observer_port = test_observer::EVENT_OBSERVER_PORT; + naka_conf.events_observers.insert(EventObserverConfig { + endpoint: format!("localhost:{observer_port}"), + events_keys: vec![ + EventKeyType::StackerDBChunks, + EventKeyType::BlockProposal, + EventKeyType::MinedBlocks, + EventKeyType::BurnchainBlocks, + ], + }); + + // The signers need some initial balances in order to pay for epoch 2.5 transaction votes + let mut initial_balances = Vec::new(); + + // TODO: separate keys for stacking and signing (because they'll be different in prod) + for key in signer_stacks_private_keys { + initial_balances.push(InitialBalance { + address: to_addr(key).into(), + amount: POX_4_DEFAULT_STACKER_BALANCE, + }); + } + naka_conf.initial_balances.append(&mut initial_balances); + naka_conf.node.stacker = true; + naka_conf.miner.wait_on_interim_blocks = Duration::from_secs(5); + + for signer_set in 0..2 { + for message_id in 0..SIGNER_SLOTS_PER_USER { + let contract_id = + NakamotoSigners::make_signers_db_contract_id(signer_set, message_id, false); + if !naka_conf.node.stacker_dbs.contains(&contract_id) { + debug!("A miner/stacker must subscribe to the {contract_id} stacker db contract. Forcibly subscribing..."); + naka_conf.node.stacker_dbs.push(contract_id); + } + } + } + node_config_modifier(&mut naka_conf); + + info!("Make new BitcoinCoreController"); + let mut btcd_controller = BitcoinCoreController::new(naka_conf.clone()); + btcd_controller + .start_bitcoind() + .map_err(|_e| ()) + .expect("Failed starting bitcoind"); + + info!("Make new BitcoinRegtestController"); + let mut btc_regtest_controller = BitcoinRegtestController::new(naka_conf.clone(), None); + + info!("Bootstraping..."); + // Should be 201 for other tests? + btc_regtest_controller.bootstrap_chain_to_pks(195, btc_miner_pubkeys); + + info!("Chain bootstrapped..."); + + let mut run_loop = boot_nakamoto::BootRunLoop::new(naka_conf.clone()).unwrap(); + let run_loop_stopper = run_loop.get_termination_switch(); + let Counters { + blocks_processed, + naka_submitted_vrfs: vrfs_submitted, + naka_submitted_commits: commits_submitted, + naka_proposed_blocks: naka_blocks_proposed, + naka_mined_blocks: naka_blocks_mined, + naka_skip_commit_op: nakamoto_test_skip_commit_op, + .. + } = run_loop.counters(); + + let coord_channel = run_loop.coordinator_channels(); + let run_loop_thread = thread::spawn(move || run_loop.start(None, 0)); + + // Give the run loop some time to start up! + info!("Wait for runloop..."); + wait_for_runloop(&blocks_processed); + + // First block wakes up the run loop. + info!("Mine first block..."); + next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); + + // Second block will hold our VRF registration. + info!("Mine second block..."); + next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); + + // Third block will be the first mined Stacks block. + info!("Mine third block..."); + next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); + + RunningNodes { + btcd_controller, + btc_regtest_controller, + run_loop_thread, + run_loop_stopper, + vrfs_submitted: vrfs_submitted.0, + commits_submitted: commits_submitted.0, + blocks_processed: blocks_processed.0, + nakamoto_blocks_proposed: naka_blocks_proposed.0, + nakamoto_blocks_mined: naka_blocks_mined.0, + nakamoto_test_skip_commit_op, + coord_channel, + conf: naka_conf, + } +} diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs new file mode 100644 index 00000000000..f589416746c --- /dev/null +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -0,0 +1,2645 @@ +// Copyright (C) 2020-2024 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use std::collections::{HashMap, HashSet}; +use std::ops::Add; +use std::str::FromStr; +use std::sync::atomic::Ordering; +use std::time::{Duration, Instant}; +use std::{env, thread}; + +use clarity::vm::types::PrincipalData; +use clarity::vm::StacksEpoch; +use libsigner::v0::messages::{ + BlockRejection, BlockResponse, MessageSlotID, RejectCode, SignerMessage, +}; +use libsigner::{BlockProposal, SignerSession, StackerDBSession}; +use rand::RngCore; +use stacks::address::AddressHashMode; +use stacks::chainstate::burn::db::sortdb::SortitionDB; +use stacks::chainstate::nakamoto::{NakamotoBlock, NakamotoBlockHeader, NakamotoChainState}; +use stacks::chainstate::stacks::address::PoxAddress; +use stacks::chainstate::stacks::boot::MINERS_NAME; +use stacks::chainstate::stacks::db::{StacksChainState, StacksHeaderInfo}; +use stacks::codec::StacksMessageCodec; +use stacks::core::{StacksEpochId, CHAIN_ID_TESTNET}; +use stacks::libstackerdb::StackerDBChunkData; +use stacks::net::api::postblock_proposal::TEST_VALIDATE_STALL; +use stacks::types::chainstate::{StacksAddress, StacksBlockId, StacksPrivateKey, StacksPublicKey}; +use stacks::types::PublicKey; +use stacks::util::hash::MerkleHashFunc; +use stacks::util::secp256k1::{Secp256k1PrivateKey, Secp256k1PublicKey}; +use stacks::util_lib::boot::boot_code_id; +use stacks::util_lib::signed_structured_data::pox4::{ + make_pox_4_signer_key_signature, Pox4SignatureTopic, +}; +use stacks_common::bitvec::BitVec; +use stacks_signer::chainstate::{ProposalEvalConfig, SortitionsView}; +use stacks_signer::client::{SignerSlotID, StackerDB}; +use stacks_signer::config::{build_signer_config_tomls, GlobalConfig as SignerConfig, Network}; +use stacks_signer::runloop::State; +use stacks_signer::v0::SpawnedSigner; +use tracing_subscriber::prelude::*; +use tracing_subscriber::{fmt, EnvFilter}; + +use super::SignerTest; +use crate::config::{EventKeyType, EventObserverConfig}; +use crate::event_dispatcher::MinedNakamotoBlockEvent; +use crate::nakamoto_node::miner::TEST_BROADCAST_STALL; +use crate::neon::Counters; +use crate::run_loop::boot_nakamoto; +use crate::tests::nakamoto_integrations::{ + boot_to_epoch_25, boot_to_epoch_3_reward_set, boot_to_epoch_3_reward_set_calculation_boundary, + next_block_and, setup_epoch_3_reward_set, wait_for, POX_4_DEFAULT_STACKER_BALANCE, + POX_4_DEFAULT_STACKER_STX_AMT, +}; +use crate::tests::neon_integrations::{ + get_account, get_chain_info, next_block_and_wait, run_until_burnchain_height, submit_tx, + test_observer, +}; +use crate::tests::{self, make_stacks_transfer}; +use crate::{nakamoto_node, BurnchainController, Config, Keychain}; + +impl SignerTest { + /// Run the test until the first epoch 2.5 reward cycle. + /// Will activate pox-4 and register signers for the first full Epoch 2.5 reward cycle. + fn boot_to_epoch_25_reward_cycle(&mut self) { + boot_to_epoch_25( + &self.running_nodes.conf, + &self.running_nodes.blocks_processed, + &mut self.running_nodes.btc_regtest_controller, + ); + + next_block_and_wait( + &mut self.running_nodes.btc_regtest_controller, + &self.running_nodes.blocks_processed, + ); + + let http_origin = format!("http://{}", &self.running_nodes.conf.node.rpc_bind); + let lock_period = 12; + + let epochs = self.running_nodes.conf.burnchain.epochs.clone().unwrap(); + let epoch_25 = + &epochs[StacksEpoch::find_epoch_by_id(&epochs, StacksEpochId::Epoch25).unwrap()]; + let epoch_25_start_height = epoch_25.start_height; + // stack enough to activate pox-4 + let block_height = self + .running_nodes + .btc_regtest_controller + .get_headers_height(); + let reward_cycle = self + .running_nodes + .btc_regtest_controller + .get_burnchain() + .block_height_to_reward_cycle(block_height) + .unwrap(); + for stacker_sk in self.signer_stacks_private_keys.iter() { + let pox_addr = PoxAddress::from_legacy( + AddressHashMode::SerializeP2PKH, + tests::to_addr(&stacker_sk).bytes, + ); + let pox_addr_tuple: clarity::vm::Value = + pox_addr.clone().as_clarity_tuple().unwrap().into(); + let signature = make_pox_4_signer_key_signature( + &pox_addr, + &stacker_sk, + reward_cycle.into(), + &Pox4SignatureTopic::StackStx, + CHAIN_ID_TESTNET, + lock_period, + u128::MAX, + 1, + ) + .unwrap() + .to_rsv(); + + let signer_pk = StacksPublicKey::from_private(stacker_sk); + let stacking_tx = tests::make_contract_call( + &stacker_sk, + 0, + 1000, + &StacksAddress::burn_address(false), + "pox-4", + "stack-stx", + &[ + clarity::vm::Value::UInt(POX_4_DEFAULT_STACKER_STX_AMT), + pox_addr_tuple.clone(), + clarity::vm::Value::UInt(block_height as u128), + clarity::vm::Value::UInt(lock_period), + clarity::vm::Value::some(clarity::vm::Value::buff_from(signature).unwrap()) + .unwrap(), + clarity::vm::Value::buff_from(signer_pk.to_bytes_compressed()).unwrap(), + clarity::vm::Value::UInt(u128::MAX), + clarity::vm::Value::UInt(1), + ], + ); + submit_tx(&http_origin, &stacking_tx); + } + next_block_and_wait( + &mut self.running_nodes.btc_regtest_controller, + &self.running_nodes.blocks_processed, + ); + next_block_and_wait( + &mut self.running_nodes.btc_regtest_controller, + &self.running_nodes.blocks_processed, + ); + + let reward_cycle_len = self + .running_nodes + .conf + .get_burnchain() + .pox_constants + .reward_cycle_length as u64; + let prepare_phase_len = self + .running_nodes + .conf + .get_burnchain() + .pox_constants + .prepare_length as u64; + + let epoch_25_reward_cycle_boundary = + epoch_25_start_height.saturating_sub(epoch_25_start_height % reward_cycle_len); + let epoch_25_reward_set_calculation_boundary = epoch_25_reward_cycle_boundary + .saturating_sub(prepare_phase_len) + .wrapping_add(reward_cycle_len) + .wrapping_add(1); + + let next_reward_cycle_boundary = + epoch_25_reward_cycle_boundary.wrapping_add(reward_cycle_len); + run_until_burnchain_height( + &mut self.running_nodes.btc_regtest_controller, + &self.running_nodes.blocks_processed, + epoch_25_reward_set_calculation_boundary, + &self.running_nodes.conf, + ); + debug!("Waiting for signer set calculation."); + let mut reward_set_calculated = false; + let short_timeout = Duration::from_secs(30); + let now = std::time::Instant::now(); + // Make sure the signer set is calculated before continuing or signers may not + // recognize that they are registered signers in the subsequent burn block event + let reward_cycle = self.get_current_reward_cycle().wrapping_add(1); + while !reward_set_calculated { + let reward_set = self + .stacks_client + .get_reward_set_signers(reward_cycle) + .expect("Failed to check if reward set is calculated"); + reward_set_calculated = reward_set.is_some(); + if reward_set_calculated { + debug!("Signer set: {:?}", reward_set.unwrap()); + } + std::thread::sleep(Duration::from_secs(1)); + assert!( + now.elapsed() < short_timeout, + "Timed out waiting for reward set calculation" + ); + } + debug!("Signer set calculated"); + // Manually consume one more block to ensure signers refresh their state + debug!("Waiting for signers to initialize."); + next_block_and_wait( + &mut self.running_nodes.btc_regtest_controller, + &self.running_nodes.blocks_processed, + ); + self.wait_for_registered(30); + debug!("Signers initialized"); + + info!("Advancing to the first full Epoch 2.5 reward cycle boundary..."); + run_until_burnchain_height( + &mut self.running_nodes.btc_regtest_controller, + &self.running_nodes.blocks_processed, + next_reward_cycle_boundary, + &self.running_nodes.conf, + ); + + let current_burn_block_height = self + .running_nodes + .btc_regtest_controller + .get_headers_height(); + info!("At burn block height {current_burn_block_height}. Ready to mine the first Epoch 2.5 reward cycle!"); + } + + /// Run the test until the epoch 3 boundary + fn boot_to_epoch_3(&mut self) { + boot_to_epoch_3_reward_set( + &self.running_nodes.conf, + &self.running_nodes.blocks_processed, + &self.signer_stacks_private_keys, + &self.signer_stacks_private_keys, + &mut self.running_nodes.btc_regtest_controller, + Some(self.num_stacking_cycles), + ); + info!("Waiting for signer set calculation."); + let mut reward_set_calculated = false; + let short_timeout = Duration::from_secs(30); + let now = std::time::Instant::now(); + // Make sure the signer set is calculated before continuing or signers may not + // recognize that they are registered signers in the subsequent burn block event + let reward_cycle = self.get_current_reward_cycle() + 1; + while !reward_set_calculated { + let reward_set = self + .stacks_client + .get_reward_set_signers(reward_cycle) + .expect("Failed to check if reward set is calculated"); + reward_set_calculated = reward_set.is_some(); + if reward_set_calculated { + debug!("Signer set: {:?}", reward_set.unwrap()); + } + std::thread::sleep(Duration::from_secs(1)); + assert!( + now.elapsed() < short_timeout, + "Timed out waiting for reward set calculation" + ); + } + info!("Signer set calculated"); + + // Manually consume one more block to ensure signers refresh their state + info!("Waiting for signers to initialize."); + next_block_and_wait( + &mut self.running_nodes.btc_regtest_controller, + &self.running_nodes.blocks_processed, + ); + self.wait_for_registered(30); + info!("Signers initialized"); + + self.run_until_epoch_3_boundary(); + + let commits_submitted = self.running_nodes.commits_submitted.clone(); + + info!("Waiting 1 burnchain block for miner VRF key confirmation"); + // Wait one block to confirm the VRF register, wait until a block commit is submitted + next_block_and(&mut self.running_nodes.btc_regtest_controller, 60, || { + let commits_count = commits_submitted.load(Ordering::SeqCst); + Ok(commits_count >= 1) + }) + .unwrap(); + info!("Ready to mine Nakamoto blocks!"); + } + + // Only call after already past the epoch 3.0 boundary + fn mine_and_verify_confirmed_naka_block(&mut self, timeout: Duration, num_signers: usize) { + info!("------------------------- Try mining one block -------------------------"); + + self.mine_nakamoto_block(timeout); + + // Verify that the signers accepted the proposed block, sending back a validate ok response + let proposed_signer_signature_hash = self.wait_for_validate_ok_response(timeout); + let message = proposed_signer_signature_hash.0; + + info!("------------------------- Test Block Signed -------------------------"); + // Verify that the signers signed the proposed block + let signature = self.wait_for_confirmed_block_v0(&proposed_signer_signature_hash, timeout); + + info!("Got {} signatures", signature.len()); + + // NOTE: signature.len() does not need to equal signers.len(); the stacks miner can finish the block + // whenever it has crossed the threshold. + assert!(signature.len() >= num_signers * 7 / 10); + + let reward_cycle = self.get_current_reward_cycle(); + let signers = self.get_reward_set_signers(reward_cycle); + + // Verify that the signers signed the proposed block + let mut signer_index = 0; + let mut signature_index = 0; + let validated = loop { + // Since we've already checked `signature.len()`, this means we've + // validated all the signatures in this loop + let Some(signature) = signature.get(signature_index) else { + break true; + }; + let Some(signer) = signers.get(signer_index) else { + error!("Failed to validate the mined nakamoto block: ran out of signers to try to validate signatures"); + break false; + }; + let stacks_public_key = Secp256k1PublicKey::from_slice(signer.signing_key.as_slice()) + .expect("Failed to convert signing key to StacksPublicKey"); + let valid = stacks_public_key + .verify(&message, signature) + .expect("Failed to verify signature"); + if !valid { + info!( + "Failed to verify signature for signer, will attempt to validate without this signer"; + "signer_pk" => stacks_public_key.to_hex(), + "signer_index" => signer_index, + "signature_index" => signature_index, + ); + signer_index += 1; + } else { + signer_index += 1; + signature_index += 1; + } + }; + + assert!(validated); + } + + // Only call after already past the epoch 3.0 boundary + fn run_until_burnchain_height_nakamoto( + &mut self, + timeout: Duration, + burnchain_height: u64, + num_signers: usize, + ) { + let current_block_height = self + .running_nodes + .btc_regtest_controller + .get_headers_height(); + let total_nmb_blocks_to_mine = burnchain_height.saturating_sub(current_block_height); + debug!("Mining {total_nmb_blocks_to_mine} Nakamoto block(s) to reach burnchain height {burnchain_height}"); + for _ in 0..total_nmb_blocks_to_mine { + self.mine_and_verify_confirmed_naka_block(timeout, num_signers); + } + } + + /// Propose an invalid block to the signers + fn propose_block(&mut self, slot_id: u32, version: u32, block: NakamotoBlock) { + let miners_contract_id = boot_code_id(MINERS_NAME, false); + let mut session = + StackerDBSession::new(&self.running_nodes.conf.node.rpc_bind, miners_contract_id); + let burn_height = self + .running_nodes + .btc_regtest_controller + .get_headers_height(); + let reward_cycle = self.get_current_reward_cycle(); + let message = SignerMessage::BlockProposal(BlockProposal { + block, + burn_height, + reward_cycle, + }); + let miner_sk = self + .running_nodes + .conf + .miner + .mining_key + .expect("No mining key"); + + // Submit the block proposal to the miner's slot + let mut chunk = StackerDBChunkData::new(slot_id, version, message.serialize_to_vec()); + chunk.sign(&miner_sk).expect("Failed to sign message chunk"); + debug!("Produced a signature: {:?}", chunk.sig); + let result = session.put_chunk(&chunk).expect("Failed to put chunk"); + debug!("Test Put Chunk ACK: {result:?}"); + assert!( + result.accepted, + "Failed to submit block proposal to signers" + ); + } +} + +#[test] +#[ignore] +/// Test that a signer can respond to an invalid block proposal +/// +/// Test Setup: +/// The test spins up five stacks signers, one miner Nakamoto node, and a corresponding bitcoind. +/// +/// Test Execution: +/// The stacks node is advanced to epoch 3.0 reward set calculation to ensure the signer set is determined. +/// An invalid block proposal is forcibly written to the miner's slot to simulate the miner proposing a block. +/// The signers process the invalid block by first verifying it against the stacks node block proposal endpoint. +/// The signers then broadcast a rejection of the miner's proposed block back to the respective .signers-XXX-YYY contract. +/// +/// Test Assertion: +/// Each signer successfully rejects the invalid block proposal. +fn block_proposal_rejection() { + if env::var("BITCOIND_TEST") != Ok("1".into()) { + return; + } + + tracing_subscriber::registry() + .with(fmt::layer()) + .with(EnvFilter::from_default_env()) + .init(); + + info!("------------------------- Test Setup -------------------------"); + let num_signers = 5; + let mut signer_test: SignerTest = SignerTest::new(num_signers, vec![], None); + signer_test.boot_to_epoch_3(); + let short_timeout = Duration::from_secs(30); + + info!("------------------------- Send Block Proposal To Signers -------------------------"); + let reward_cycle = signer_test.get_current_reward_cycle(); + let proposal_conf = ProposalEvalConfig { + first_proposal_burn_block_timing: Duration::from_secs(0), + block_proposal_timeout: Duration::from_secs(100), + }; + let view = SortitionsView::fetch_view(proposal_conf, &signer_test.stacks_client).unwrap(); + let mut block = NakamotoBlock { + header: NakamotoBlockHeader::empty(), + txs: vec![], + }; + + // First propose a block to the signers that does not have the correct consensus hash or BitVec. This should be rejected BEFORE + // the block is submitted to the node for validation. + let block_signer_signature_hash_1 = block.header.signer_signature_hash(); + signer_test.propose_block(0, 1, block.clone()); + + // Propose a block to the signers that passes initial checks but will be rejected by the stacks node + block.header.pox_treatment = BitVec::ones(1).unwrap(); + block.header.consensus_hash = view.cur_sortition.consensus_hash; + + let block_signer_signature_hash_2 = block.header.signer_signature_hash(); + signer_test.propose_block(0, 2, block); + + info!("------------------------- Test Block Proposal Rejected -------------------------"); + // Verify the signers rejected the second block via the endpoint + let rejected_block_hash = signer_test.wait_for_validate_reject_response(short_timeout); + assert_eq!(rejected_block_hash, block_signer_signature_hash_2); + + let mut stackerdb = StackerDB::new( + &signer_test.running_nodes.conf.node.rpc_bind, + StacksPrivateKey::new(), // We are just reading so don't care what the key is + false, + reward_cycle, + SignerSlotID(0), // We are just reading so again, don't care about index. + ); + + let signer_slot_ids: Vec<_> = signer_test + .get_signer_indices(reward_cycle) + .iter() + .map(|id| id.0) + .collect(); + assert_eq!(signer_slot_ids.len(), num_signers); + + let start_polling = Instant::now(); + let mut found_signer_signature_hash_1 = false; + let mut found_signer_signature_hash_2 = false; + while !found_signer_signature_hash_1 && !found_signer_signature_hash_2 { + std::thread::sleep(Duration::from_secs(1)); + let messages: Vec = StackerDB::get_messages( + stackerdb + .get_session_mut(&MessageSlotID::BlockResponse) + .expect("Failed to get BlockResponse stackerdb session"), + &signer_slot_ids, + ) + .expect("Failed to get message from stackerdb"); + for message in messages { + if let SignerMessage::BlockResponse(BlockResponse::Rejected(BlockRejection { + reason: _reason, + reason_code, + signer_signature_hash, + })) = message + { + if signer_signature_hash == block_signer_signature_hash_1 { + found_signer_signature_hash_1 = true; + assert!(matches!(reason_code, RejectCode::SortitionViewMismatch)); + } else if signer_signature_hash == block_signer_signature_hash_2 { + found_signer_signature_hash_2 = true; + assert!(matches!(reason_code, RejectCode::ValidationFailed(_))); + } else { + panic!("Unexpected signer signature hash"); + } + } else { + panic!("Unexpected message type"); + } + } + assert!( + start_polling.elapsed() <= short_timeout, + "Timed out after waiting for response from signer" + ); + } + signer_test.shutdown(); +} + +// Basic test to ensure that miners are able to gather block responses +// from signers and create blocks. +#[test] +#[ignore] +fn miner_gather_signatures() { + if env::var("BITCOIND_TEST") != Ok("1".into()) { + return; + } + + tracing_subscriber::registry() + .with(fmt::layer()) + .with(EnvFilter::from_default_env()) + .init(); + + // Disable p2p broadcast of the nakamoto blocks, so that we rely + // on the signer's using StackerDB to get pushed blocks + *nakamoto_node::miner::TEST_SKIP_P2P_BROADCAST + .lock() + .unwrap() = Some(true); + + info!("------------------------- Test Setup -------------------------"); + let num_signers = 5; + let mut signer_test: SignerTest = SignerTest::new(num_signers, vec![], None); + signer_test.boot_to_epoch_3(); + let timeout = Duration::from_secs(30); + + info!("------------------------- Test Mine and Verify Confirmed Nakamoto Block -------------------------"); + signer_test.mine_and_verify_confirmed_naka_block(timeout, num_signers); + + // Test prometheus metrics response + #[cfg(feature = "monitoring_prom")] + { + let metrics_response = signer_test.get_signer_metrics(); + + // Because 5 signers are running in the same process, the prometheus metrics + // are incremented once for every signer. This is why we expect the metric to be + // `5`, even though there is only one block proposed. + let expected_result = format!("stacks_signer_block_proposals_received {}", num_signers); + assert!(metrics_response.contains(&expected_result)); + let expected_result = format!( + "stacks_signer_block_responses_sent{{response_type=\"accepted\"}} {}", + num_signers + ); + assert!(metrics_response.contains(&expected_result)); + } +} + +#[test] +#[ignore] +/// Test that signers can handle a transition between Nakamoto reward cycles +/// +/// Test Setup: +/// The test spins up five stacks signers, one miner Nakamoto node, and a corresponding bitcoind. +/// The stacks node is then advanced to Epoch 3.0 boundary to allow block signing. +/// +/// Test Execution: +/// The node mines 2 full Nakamoto reward cycles, sending blocks to observing signers to sign and return. +/// +/// Test Assertion: +/// All signers sign all blocks successfully. +/// The chain advances 2 full reward cycles. +fn mine_2_nakamoto_reward_cycles() { + if env::var("BITCOIND_TEST") != Ok("1".into()) { + return; + } + + tracing_subscriber::registry() + .with(fmt::layer()) + .with(EnvFilter::from_default_env()) + .init(); + + info!("------------------------- Test Setup -------------------------"); + let nmb_reward_cycles = 2; + let num_signers = 5; + let mut signer_test: SignerTest = SignerTest::new(num_signers, vec![], None); + let timeout = Duration::from_secs(200); + signer_test.boot_to_epoch_3(); + let curr_reward_cycle = signer_test.get_current_reward_cycle(); + // Mine 2 full Nakamoto reward cycles (epoch 3 starts in the middle of one, hence the + 1) + let next_reward_cycle = curr_reward_cycle.saturating_add(1); + let final_reward_cycle = next_reward_cycle.saturating_add(nmb_reward_cycles); + let final_reward_cycle_height_boundary = signer_test + .running_nodes + .btc_regtest_controller + .get_burnchain() + .reward_cycle_to_block_height(final_reward_cycle) + .saturating_sub(1); + + info!("------------------------- Test Mine 2 Nakamoto Reward Cycles -------------------------"); + signer_test.run_until_burnchain_height_nakamoto( + timeout, + final_reward_cycle_height_boundary, + num_signers, + ); + + let current_burnchain_height = signer_test + .running_nodes + .btc_regtest_controller + .get_headers_height(); + assert_eq!(current_burnchain_height, final_reward_cycle_height_boundary); + signer_test.shutdown(); +} + +#[test] +#[ignore] +fn forked_tenure_invalid() { + if env::var("BITCOIND_TEST") != Ok("1".into()) { + return; + } + let result = forked_tenure_testing(Duration::from_secs(5), Duration::from_secs(7), false); + + assert_ne!(result.tip_b, result.tip_a); + assert_eq!(result.tip_b, result.tip_c); + assert_ne!(result.tip_c, result.tip_a); + + // Block B was built atop block A + assert_eq!( + result.tip_b.stacks_block_height, + result.tip_a.stacks_block_height + 1 + ); + assert_eq!( + result.mined_b.parent_block_id, + result.tip_a.index_block_hash().to_string() + ); + + // Block C was built AFTER Block B was built, but BEFORE it was broadcasted, so it should be built off of Block A + assert_eq!( + result.mined_c.parent_block_id, + result.tip_a.index_block_hash().to_string() + ); + assert_ne!( + result + .tip_c + .anchored_header + .as_stacks_nakamoto() + .unwrap() + .signer_signature_hash(), + result.mined_c.signer_signature_hash, + "Mined block during tenure C should not have become the chain tip" + ); + + assert!(result.tip_c_2.is_none()); + assert!(result.mined_c_2.is_none()); + + // Tenure D should continue progress + assert_ne!(result.tip_c, result.tip_d); + assert_ne!(result.tip_b, result.tip_d); + assert_ne!(result.tip_a, result.tip_d); + + // Tenure D builds off of Tenure B + assert_eq!( + result.tip_d.stacks_block_height, + result.tip_b.stacks_block_height + 1, + ); + assert_eq!( + result.mined_d.parent_block_id, + result.tip_b.index_block_hash().to_string() + ); +} + +#[test] +#[ignore] +fn forked_tenure_okay() { + if env::var("BITCOIND_TEST") != Ok("1".into()) { + return; + } + + let result = forked_tenure_testing(Duration::from_secs(360), Duration::from_secs(0), true); + + assert_ne!(result.tip_b, result.tip_a); + assert_ne!(result.tip_b, result.tip_c); + assert_ne!(result.tip_c, result.tip_a); + + // Block B was built atop block A + assert_eq!( + result.tip_b.stacks_block_height, + result.tip_a.stacks_block_height + 1 + ); + assert_eq!( + result.mined_b.parent_block_id, + result.tip_a.index_block_hash().to_string() + ); + + // Block C was built AFTER Block B was built, but BEFORE it was broadcasted, so it should be built off of Block A + assert_eq!( + result.tip_c.stacks_block_height, + result.tip_a.stacks_block_height + 1 + ); + assert_eq!( + result.mined_c.parent_block_id, + result.tip_a.index_block_hash().to_string() + ); + + let tenure_c_2 = result.tip_c_2.unwrap(); + assert_ne!(result.tip_c, tenure_c_2); + assert_ne!(tenure_c_2, result.tip_d); + assert_ne!(result.tip_c, result.tip_d); + + // Second block of tenure C builds off of block C + assert_eq!( + tenure_c_2.stacks_block_height, + result.tip_c.stacks_block_height + 1, + ); + assert_eq!( + result.mined_c_2.unwrap().parent_block_id, + result.tip_c.index_block_hash().to_string() + ); + + // Tenure D builds off of the second block of tenure C + assert_eq!( + result.tip_d.stacks_block_height, + tenure_c_2.stacks_block_height + 1, + ); + assert_eq!( + result.mined_d.parent_block_id, + tenure_c_2.index_block_hash().to_string() + ); +} + +struct TenureForkingResult { + tip_a: StacksHeaderInfo, + tip_b: StacksHeaderInfo, + tip_c: StacksHeaderInfo, + tip_c_2: Option, + tip_d: StacksHeaderInfo, + mined_b: MinedNakamotoBlockEvent, + mined_c: MinedNakamotoBlockEvent, + mined_c_2: Option, + mined_d: MinedNakamotoBlockEvent, +} + +#[test] +#[ignore] +/// Test to make sure that the signers are capable of reloading their reward set +/// if the stacks-node doesn't have it available at the first block of a prepare phase (e.g., if there was no block) +fn reloads_signer_set_in() { + tracing_subscriber::registry() + .with(fmt::layer()) + .with(EnvFilter::from_default_env()) + .init(); + + let num_signers = 5; + let sender_sk = Secp256k1PrivateKey::new(); + let sender_addr = tests::to_addr(&sender_sk); + let send_amt = 100; + let send_fee = 180; + let mut signer_test: SignerTest = SignerTest::new_with_config_modifications( + num_signers, + vec![(sender_addr.clone(), send_amt + send_fee)], + Some(Duration::from_secs(15)), + |_config| {}, + |_| {}, + &[], + ); + + setup_epoch_3_reward_set( + &signer_test.running_nodes.conf, + &signer_test.running_nodes.blocks_processed, + &signer_test.signer_stacks_private_keys, + &signer_test.signer_stacks_private_keys, + &mut signer_test.running_nodes.btc_regtest_controller, + Some(signer_test.num_stacking_cycles), + ); + + let naka_conf = &signer_test.running_nodes.conf; + let epochs = naka_conf.burnchain.epochs.clone().unwrap(); + let epoch_3 = &epochs[StacksEpoch::find_epoch_by_id(&epochs, StacksEpochId::Epoch30).unwrap()]; + let reward_cycle_len = naka_conf.get_burnchain().pox_constants.reward_cycle_length as u64; + let prepare_phase_len = naka_conf.get_burnchain().pox_constants.prepare_length as u64; + + let epoch_3_start_height = epoch_3.start_height; + assert!( + epoch_3_start_height > 0, + "Epoch 3.0 start height must be greater than 0" + ); + let epoch_3_reward_cycle_boundary = + epoch_3_start_height.saturating_sub(epoch_3_start_height % reward_cycle_len); + let before_epoch_3_reward_set_calculation = + epoch_3_reward_cycle_boundary.saturating_sub(prepare_phase_len); + run_until_burnchain_height( + &mut signer_test.running_nodes.btc_regtest_controller, + &signer_test.running_nodes.blocks_processed, + before_epoch_3_reward_set_calculation, + naka_conf, + ); + + info!("Waiting for signer set calculation."); + let mut reward_set_calculated = false; + let short_timeout = Duration::from_secs(30); + let now = std::time::Instant::now(); + // Make sure the signer set is calculated before continuing or signers may not + // recognize that they are registered signers in the subsequent burn block event + let reward_cycle = signer_test.get_current_reward_cycle() + 1; + signer_test + .running_nodes + .btc_regtest_controller + .build_next_block(1); + while !reward_set_calculated { + let reward_set = signer_test + .stacks_client + .get_reward_set_signers(reward_cycle) + .expect("Failed to check if reward set is calculated"); + reward_set_calculated = reward_set.is_some(); + if reward_set_calculated { + info!("Signer set: {:?}", reward_set.unwrap()); + } + std::thread::sleep(Duration::from_secs(1)); + assert!( + now.elapsed() < short_timeout, + "Timed out waiting for reward set calculation" + ); + } + info!("Signer set calculated"); + + // Manually consume one more block to ensure signers refresh their state + info!("Waiting for signers to initialize."); + next_block_and_wait( + &mut signer_test.running_nodes.btc_regtest_controller, + &signer_test.running_nodes.blocks_processed, + ); + signer_test.wait_for_registered(30); + info!("Signers initialized"); + + signer_test.run_until_epoch_3_boundary(); + + let commits_submitted = signer_test.running_nodes.commits_submitted.clone(); + + info!("Waiting 1 burnchain block for miner VRF key confirmation"); + // Wait one block to confirm the VRF register, wait until a block commit is submitted + next_block_and( + &mut signer_test.running_nodes.btc_regtest_controller, + 60, + || { + let commits_count = commits_submitted.load(Ordering::SeqCst); + Ok(commits_count >= 1) + }, + ) + .unwrap(); + info!("Ready to mine Nakamoto blocks!"); + + info!("------------------------- Reached Epoch 3.0 -------------------------"); + signer_test.shutdown(); +} + +/// This test spins up a nakamoto-neon node. +/// It starts in Epoch 2.0, mines with `neon_node` to Epoch 3.0, and then switches +/// to Nakamoto operation (activating pox-4 by submitting a stack-stx tx). The BootLoop +/// struct handles the epoch-2/3 tear-down and spin-up. +/// Miner A mines a regular tenure, its last block being block a_x. +/// Miner B starts its tenure, Miner B produces a Stacks block b_0, but miner C submits its block commit before b_0 is broadcasted. +/// Bitcoin block C, containing Miner C's block commit, is mined BEFORE miner C has a chance to update their block commit with b_0's information. +/// This test asserts: +/// * tenure C ignores b_0, and correctly builds off of block a_x. +fn forked_tenure_testing( + proposal_limit: Duration, + post_btc_block_pause: Duration, + expect_tenure_c: bool, +) -> TenureForkingResult { + tracing_subscriber::registry() + .with(fmt::layer()) + .with(EnvFilter::from_default_env()) + .init(); + + let num_signers = 5; + let sender_sk = Secp256k1PrivateKey::new(); + let sender_addr = tests::to_addr(&sender_sk); + let send_amt = 100; + let send_fee = 180; + let recipient = PrincipalData::from(StacksAddress::burn_address(false)); + let mut signer_test: SignerTest = SignerTest::new_with_config_modifications( + num_signers, + vec![(sender_addr.clone(), send_amt + send_fee)], + Some(Duration::from_secs(15)), + |config| { + // make the duration long enough that the reorg attempt will definitely be accepted + config.first_proposal_burn_block_timing = proposal_limit; + }, + |_| {}, + &[], + ); + let http_origin = format!("http://{}", &signer_test.running_nodes.conf.node.rpc_bind); + + signer_test.boot_to_epoch_3(); + info!("------------------------- Reached Epoch 3.0 -------------------------"); + + let naka_conf = signer_test.running_nodes.conf.clone(); + let burnchain = naka_conf.get_burnchain(); + let sortdb = burnchain.open_sortition_db(true).unwrap(); + let (chainstate, _) = StacksChainState::open( + naka_conf.is_mainnet(), + naka_conf.burnchain.chain_id, + &naka_conf.get_chainstate_path_str(), + None, + ) + .unwrap(); + + let commits_submitted = signer_test.running_nodes.commits_submitted.clone(); + let mined_blocks = signer_test.running_nodes.nakamoto_blocks_mined.clone(); + let proposed_blocks = signer_test.running_nodes.nakamoto_blocks_proposed.clone(); + + info!("Starting tenure A."); + // In the next block, the miner should win the tenure and submit a stacks block + let commits_before = commits_submitted.load(Ordering::SeqCst); + let blocks_before = mined_blocks.load(Ordering::SeqCst); + next_block_and( + &mut signer_test.running_nodes.btc_regtest_controller, + 60, + || { + let commits_count = commits_submitted.load(Ordering::SeqCst); + let blocks_count = mined_blocks.load(Ordering::SeqCst); + Ok(commits_count > commits_before && blocks_count > blocks_before) + }, + ) + .unwrap(); + + let tip_a = NakamotoChainState::get_canonical_block_header(chainstate.db(), &sortdb) + .unwrap() + .unwrap(); + + // For the next tenure, submit the commit op but do not allow any stacks blocks to be broadcasted + TEST_BROADCAST_STALL.lock().unwrap().replace(true); + let blocks_before = mined_blocks.load(Ordering::SeqCst); + let commits_before = commits_submitted.load(Ordering::SeqCst); + info!("Starting tenure B."); + next_block_and( + &mut signer_test.running_nodes.btc_regtest_controller, + 60, + || { + let commits_count = commits_submitted.load(Ordering::SeqCst); + Ok(commits_count > commits_before) + }, + ) + .unwrap(); + + info!("Commit op is submitted; unpause tenure B's block"); + + // Unpause the broadcast of Tenure B's block, do not submit commits. + signer_test + .running_nodes + .nakamoto_test_skip_commit_op + .0 + .lock() + .unwrap() + .replace(true); + TEST_BROADCAST_STALL.lock().unwrap().replace(false); + + // Wait for a stacks block to be broadcasted + let start_time = Instant::now(); + while mined_blocks.load(Ordering::SeqCst) <= blocks_before { + assert!( + start_time.elapsed() < Duration::from_secs(30), + "FAIL: Test timed out while waiting for block production", + ); + thread::sleep(Duration::from_secs(1)); + } + + info!("Tenure B broadcasted a block. Wait {post_btc_block_pause:?}, issue the next bitcon block, and un-stall block commits."); + thread::sleep(post_btc_block_pause); + let tip_b = NakamotoChainState::get_canonical_block_header(chainstate.db(), &sortdb) + .unwrap() + .unwrap(); + let blocks = test_observer::get_mined_nakamoto_blocks(); + let mined_b = blocks.last().unwrap().clone(); + + info!("Starting tenure C."); + // Submit a block commit op for tenure C + let commits_before = commits_submitted.load(Ordering::SeqCst); + let blocks_before = if expect_tenure_c { + mined_blocks.load(Ordering::SeqCst) + } else { + proposed_blocks.load(Ordering::SeqCst) + }; + next_block_and( + &mut signer_test.running_nodes.btc_regtest_controller, + 60, + || { + signer_test + .running_nodes + .nakamoto_test_skip_commit_op + .0 + .lock() + .unwrap() + .replace(false); + let commits_count = commits_submitted.load(Ordering::SeqCst); + let blocks_count = if expect_tenure_c { + mined_blocks.load(Ordering::SeqCst) + } else { + proposed_blocks.load(Ordering::SeqCst) + }; + Ok(commits_count > commits_before && blocks_count > blocks_before) + }, + ) + .unwrap(); + + info!("Tenure C produced (or proposed) a block!"); + let tip_c = NakamotoChainState::get_canonical_block_header(chainstate.db(), &sortdb) + .unwrap() + .unwrap(); + + let blocks = test_observer::get_mined_nakamoto_blocks(); + let mined_c = blocks.last().unwrap().clone(); + + let (tip_c_2, mined_c_2) = if !expect_tenure_c { + (None, None) + } else { + // Now let's produce a second block for tenure C and ensure it builds off of block C. + let blocks_before = mined_blocks.load(Ordering::SeqCst); + let start_time = Instant::now(); + // submit a tx so that the miner will mine an extra block + let sender_nonce = 0; + let transfer_tx = + make_stacks_transfer(&sender_sk, sender_nonce, send_fee, &recipient, send_amt); + let tx = submit_tx(&http_origin, &transfer_tx); + info!("Submitted tx {tx} in Tenure C to mine a second block"); + while mined_blocks.load(Ordering::SeqCst) <= blocks_before { + assert!( + start_time.elapsed() < Duration::from_secs(30), + "FAIL: Test timed out while waiting for block production", + ); + thread::sleep(Duration::from_secs(1)); + } + + info!("Tenure C produced a second block!"); + + let block_2_tenure_c = + NakamotoChainState::get_canonical_block_header(chainstate.db(), &sortdb) + .unwrap() + .unwrap(); + let blocks = test_observer::get_mined_nakamoto_blocks(); + let block_2_c = blocks.last().cloned().unwrap(); + (Some(block_2_tenure_c), Some(block_2_c)) + }; + + info!("Starting tenure D."); + // Submit a block commit op for tenure D and mine a stacks block + let commits_before = commits_submitted.load(Ordering::SeqCst); + let blocks_before = mined_blocks.load(Ordering::SeqCst); + next_block_and( + &mut signer_test.running_nodes.btc_regtest_controller, + 60, + || { + let commits_count = commits_submitted.load(Ordering::SeqCst); + let blocks_count = mined_blocks.load(Ordering::SeqCst); + Ok(commits_count > commits_before && blocks_count > blocks_before) + }, + ) + .unwrap(); + + let tip_d = NakamotoChainState::get_canonical_block_header(chainstate.db(), &sortdb) + .unwrap() + .unwrap(); + let blocks = test_observer::get_mined_nakamoto_blocks(); + let mined_d = blocks.last().unwrap().clone(); + signer_test.shutdown(); + TenureForkingResult { + tip_a, + tip_b, + tip_c, + tip_c_2, + tip_d, + mined_b, + mined_c, + mined_c_2, + mined_d, + } +} + +#[test] +#[ignore] +fn bitcoind_forking_test() { + if env::var("BITCOIND_TEST") != Ok("1".into()) { + return; + } + + let num_signers = 5; + let sender_sk = Secp256k1PrivateKey::new(); + let sender_addr = tests::to_addr(&sender_sk); + let send_amt = 100; + let send_fee = 180; + let mut signer_test: SignerTest = SignerTest::new( + num_signers, + vec![(sender_addr.clone(), send_amt + send_fee)], + Some(Duration::from_secs(15)), + ); + let conf = signer_test.running_nodes.conf.clone(); + let http_origin = format!("http://{}", &conf.node.rpc_bind); + let miner_address = Keychain::default(conf.node.seed.clone()) + .origin_address(conf.is_mainnet()) + .unwrap(); + + signer_test.boot_to_epoch_3(); + info!("------------------------- Reached Epoch 3.0 -------------------------"); + let pre_epoch_3_nonce = get_account(&http_origin, &miner_address).nonce; + let pre_fork_tenures = 10; + + for _i in 0..pre_fork_tenures { + let _mined_block = signer_test.mine_nakamoto_block(Duration::from_secs(30)); + } + + let pre_fork_1_nonce = get_account(&http_origin, &miner_address).nonce; + + assert_eq!(pre_fork_1_nonce, pre_epoch_3_nonce + 2 * pre_fork_tenures); + + info!("------------------------- Triggering Bitcoin Fork -------------------------"); + + let burn_block_height = get_chain_info(&signer_test.running_nodes.conf).burn_block_height; + let burn_header_hash_to_fork = signer_test + .running_nodes + .btc_regtest_controller + .get_block_hash(burn_block_height); + signer_test + .running_nodes + .btc_regtest_controller + .invalidate_block(&burn_header_hash_to_fork); + signer_test + .running_nodes + .btc_regtest_controller + .build_next_block(1); + + info!("Wait for block off of shallow fork"); + thread::sleep(Duration::from_secs(15)); + + // we need to mine some blocks to get back to being considered a frequent miner + for _i in 0..3 { + let commits_count = signer_test + .running_nodes + .commits_submitted + .load(Ordering::SeqCst); + next_block_and( + &mut signer_test.running_nodes.btc_regtest_controller, + 60, + || { + Ok(signer_test + .running_nodes + .commits_submitted + .load(Ordering::SeqCst) + > commits_count) + }, + ) + .unwrap(); + } + + let post_fork_1_nonce = get_account(&http_origin, &miner_address).nonce; + + assert_eq!(post_fork_1_nonce, pre_fork_1_nonce - 1 * 2); + + for _i in 0..5 { + signer_test.mine_nakamoto_block(Duration::from_secs(30)); + } + + let pre_fork_2_nonce = get_account(&http_origin, &miner_address).nonce; + assert_eq!(pre_fork_2_nonce, post_fork_1_nonce + 2 * 5); + + info!( + "New chain info: {:?}", + get_chain_info(&signer_test.running_nodes.conf) + ); + + info!("------------------------- Triggering Deeper Bitcoin Fork -------------------------"); + + let burn_block_height = get_chain_info(&signer_test.running_nodes.conf).burn_block_height; + let burn_header_hash_to_fork = signer_test + .running_nodes + .btc_regtest_controller + .get_block_hash(burn_block_height - 3); + signer_test + .running_nodes + .btc_regtest_controller + .invalidate_block(&burn_header_hash_to_fork); + signer_test + .running_nodes + .btc_regtest_controller + .build_next_block(4); + + info!("Wait for block off of shallow fork"); + thread::sleep(Duration::from_secs(15)); + + // we need to mine some blocks to get back to being considered a frequent miner + for _i in 0..3 { + let commits_count = signer_test + .running_nodes + .commits_submitted + .load(Ordering::SeqCst); + next_block_and( + &mut signer_test.running_nodes.btc_regtest_controller, + 60, + || { + Ok(signer_test + .running_nodes + .commits_submitted + .load(Ordering::SeqCst) + > commits_count) + }, + ) + .unwrap(); + } + + let post_fork_2_nonce = get_account(&http_origin, &miner_address).nonce; + + assert_eq!(post_fork_2_nonce, pre_fork_2_nonce - 4 * 2); + + for _i in 0..5 { + signer_test.mine_nakamoto_block(Duration::from_secs(30)); + } + + let test_end_nonce = get_account(&http_origin, &miner_address).nonce; + assert_eq!(test_end_nonce, post_fork_2_nonce + 2 * 5); + + info!( + "New chain info: {:?}", + get_chain_info(&signer_test.running_nodes.conf) + ); + signer_test.shutdown(); +} + +#[test] +#[ignore] +fn multiple_miners() { + if env::var("BITCOIND_TEST") != Ok("1".into()) { + return; + } + + let num_signers = 5; + let sender_sk = Secp256k1PrivateKey::new(); + let sender_addr = tests::to_addr(&sender_sk); + let send_amt = 100; + let send_fee = 180; + + let btc_miner_1_seed = vec![1, 1, 1, 1]; + let btc_miner_2_seed = vec![2, 2, 2, 2]; + let btc_miner_1_pk = Keychain::default(btc_miner_1_seed.clone()).get_pub_key(); + let btc_miner_2_pk = Keychain::default(btc_miner_2_seed.clone()).get_pub_key(); + + let node_1_rpc = 51024; + let node_1_p2p = 51023; + let node_2_rpc = 51026; + let node_2_p2p = 51025; + + let node_1_rpc_bind = format!("127.0.0.1:{}", node_1_rpc); + let node_2_rpc_bind = format!("127.0.0.1:{}", node_2_rpc); + let mut node_2_listeners = Vec::new(); + + // partition the signer set so that ~half are listening and using node 1 for RPC and events, + // and the rest are using node 2 + + let mut signer_test: SignerTest = SignerTest::new_with_config_modifications( + num_signers, + vec![(sender_addr.clone(), send_amt + send_fee)], + Some(Duration::from_secs(15)), + |signer_config| { + let node_host = if signer_config.endpoint.port() % 2 == 0 { + &node_1_rpc_bind + } else { + &node_2_rpc_bind + }; + signer_config.node_host = node_host.to_string(); + }, + |config| { + let localhost = "127.0.0.1"; + config.node.rpc_bind = format!("{}:{}", localhost, node_1_rpc); + config.node.p2p_bind = format!("{}:{}", localhost, node_1_p2p); + config.node.data_url = format!("http://{}:{}", localhost, node_1_rpc); + config.node.p2p_address = format!("{}:{}", localhost, node_1_p2p); + + config.node.seed = btc_miner_1_seed.clone(); + config.node.local_peer_seed = btc_miner_1_seed.clone(); + config.burnchain.local_mining_public_key = Some(btc_miner_1_pk.to_hex()); + config.miner.mining_key = Some(Secp256k1PrivateKey::from_seed(&[1])); + + config.events_observers.retain(|listener| { + let Ok(addr) = std::net::SocketAddr::from_str(&listener.endpoint) else { + warn!( + "Cannot parse {} to a socket, assuming it isn't a signer-listener binding", + listener.endpoint + ); + return true; + }; + if addr.port() % 2 == 0 || addr.port() == test_observer::EVENT_OBSERVER_PORT { + return true; + } + node_2_listeners.push(listener.clone()); + false + }) + }, + &[btc_miner_1_pk.clone(), btc_miner_2_pk.clone()], + ); + let conf = signer_test.running_nodes.conf.clone(); + let mut conf_node_2 = conf.clone(); + let localhost = "127.0.0.1"; + conf_node_2.node.rpc_bind = format!("{}:{}", localhost, node_2_rpc); + conf_node_2.node.p2p_bind = format!("{}:{}", localhost, node_2_p2p); + conf_node_2.node.data_url = format!("http://{}:{}", localhost, node_2_rpc); + conf_node_2.node.p2p_address = format!("{}:{}", localhost, node_2_p2p); + conf_node_2.node.seed = btc_miner_2_seed.clone(); + conf_node_2.burnchain.local_mining_public_key = Some(btc_miner_2_pk.to_hex()); + conf_node_2.node.local_peer_seed = btc_miner_2_seed.clone(); + conf_node_2.miner.mining_key = Some(Secp256k1PrivateKey::from_seed(&[2])); + conf_node_2.node.miner = true; + conf_node_2.events_observers.clear(); + conf_node_2.events_observers.extend(node_2_listeners); + assert!(!conf_node_2.events_observers.is_empty()); + + let node_1_sk = Secp256k1PrivateKey::from_seed(&conf.node.local_peer_seed); + let node_1_pk = StacksPublicKey::from_private(&node_1_sk); + + conf_node_2.node.working_dir = format!("{}-{}", conf_node_2.node.working_dir, "1"); + + conf_node_2.node.set_bootstrap_nodes( + format!("{}@{}", &node_1_pk.to_hex(), conf.node.p2p_bind), + conf.burnchain.chain_id, + conf.burnchain.peer_version, + ); + + let mut run_loop_2 = boot_nakamoto::BootRunLoop::new(conf_node_2.clone()).unwrap(); + let rl2_coord_channels = run_loop_2.coordinator_channels(); + let Counters { + naka_submitted_commits: rl2_commits, + .. + } = run_loop_2.counters(); + let _run_loop_2_thread = thread::Builder::new() + .name("run_loop_2".into()) + .spawn(move || run_loop_2.start(None, 0)) + .unwrap(); + + signer_test.boot_to_epoch_3(); + let pre_nakamoto_peer_1_height = get_chain_info(&conf).stacks_tip_height; + + info!("------------------------- Reached Epoch 3.0 -------------------------"); + + let max_nakamoto_tenures = 20; + + // due to the random nature of mining sortitions, the way this test is structured + // is that we keep track of how many tenures each miner produced, and once enough sortitions + // have been produced such that each miner has produced 3 tenures, we stop and check the + // results at the end + let rl1_coord_channels = signer_test.running_nodes.coord_channel.clone(); + let rl1_commits = signer_test.running_nodes.commits_submitted.clone(); + + let miner_1_pk = StacksPublicKey::from_private(conf.miner.mining_key.as_ref().unwrap()); + let miner_2_pk = StacksPublicKey::from_private(conf_node_2.miner.mining_key.as_ref().unwrap()); + let mut btc_blocks_mined = 0; + let mut miner_1_tenures = 0; + let mut miner_2_tenures = 0; + while !(miner_1_tenures >= 3 && miner_2_tenures >= 3) { + if btc_blocks_mined > max_nakamoto_tenures { + panic!("Produced {btc_blocks_mined} sortitions, but didn't cover the test scenarios, aborting"); + } + signer_test.mine_block_wait_on_processing( + &[&rl1_coord_channels, &rl2_coord_channels], + &[&rl1_commits, &rl2_commits], + Duration::from_secs(30), + ); + btc_blocks_mined += 1; + let blocks = get_nakamoto_headers(&conf); + // for this test, there should be one block per tenure + let consensus_hash_set: HashSet<_> = blocks + .iter() + .map(|header| header.consensus_hash.clone()) + .collect(); + assert_eq!( + consensus_hash_set.len(), + blocks.len(), + "In this test, there should only be one block per tenure" + ); + miner_1_tenures = blocks + .iter() + .filter(|header| { + let header = header.anchored_header.as_stacks_nakamoto().unwrap(); + miner_1_pk + .verify( + header.miner_signature_hash().as_bytes(), + &header.miner_signature, + ) + .unwrap() + }) + .count(); + miner_2_tenures = blocks + .iter() + .filter(|header| { + let header = header.anchored_header.as_stacks_nakamoto().unwrap(); + miner_2_pk + .verify( + header.miner_signature_hash().as_bytes(), + &header.miner_signature, + ) + .unwrap() + }) + .count(); + } + + info!( + "New chain info: {:?}", + get_chain_info(&signer_test.running_nodes.conf) + ); + + info!("New chain info: {:?}", get_chain_info(&conf_node_2)); + + let peer_1_height = get_chain_info(&conf).stacks_tip_height; + let peer_2_height = get_chain_info(&conf_node_2).stacks_tip_height; + info!("Peer height information"; "peer_1" => peer_1_height, "peer_2" => peer_2_height, "pre_naka_height" => pre_nakamoto_peer_1_height); + assert_eq!(peer_1_height, peer_2_height); + assert_eq!(peer_1_height, pre_nakamoto_peer_1_height + btc_blocks_mined); + assert_eq!( + btc_blocks_mined, + u64::try_from(miner_1_tenures + miner_2_tenures).unwrap() + ); + + signer_test.shutdown(); +} + +/// Read processed nakamoto block IDs from the test observer, and use `config` to open +/// a chainstate DB and returns their corresponding StacksHeaderInfos +fn get_nakamoto_headers(config: &Config) -> Vec { + let nakamoto_block_ids: Vec<_> = test_observer::get_blocks() + .into_iter() + .filter_map(|block_json| { + if block_json + .as_object() + .unwrap() + .get("miner_signature") + .is_none() + { + return None; + } + let block_id = StacksBlockId::from_hex( + &block_json + .as_object() + .unwrap() + .get("index_block_hash") + .unwrap() + .as_str() + .unwrap()[2..], + ) + .unwrap(); + Some(block_id) + }) + .collect(); + + let (chainstate, _) = StacksChainState::open( + config.is_mainnet(), + config.burnchain.chain_id, + &config.get_chainstate_path_str(), + None, + ) + .unwrap(); + + nakamoto_block_ids + .into_iter() + .map(|block_id| { + NakamotoChainState::get_block_header(chainstate.db(), &block_id) + .unwrap() + .unwrap() + }) + .collect() +} + +#[test] +#[ignore] +// Test two nakamoto miners, with the signer set split between them. +// One of the miners (run-loop-2) is prevented from submitting "good" block commits +// using the "commit stall" test flag in combination with "block broadcast stalls". +// (Because RL2 isn't able to RBF their initial commits after the tip is broadcasted). +// This test works by tracking two different scenarios: +// 1. RL2 must win a sortition that this block commit behavior would lead to a fork in. +// 2. After such a sortition, RL1 must win another block. +// The test asserts that every nakamoto sortition either has a successful tenure, or if +// RL2 wins and they would be expected to fork, no blocks are produced. The test asserts +// that every block produced increments the chain length. +fn miner_forking() { + if env::var("BITCOIND_TEST") != Ok("1".into()) { + return; + } + + let num_signers = 5; + let sender_sk = Secp256k1PrivateKey::new(); + let sender_addr = tests::to_addr(&sender_sk); + let send_amt = 100; + let send_fee = 180; + + let btc_miner_1_seed = vec![1, 1, 1, 1]; + let btc_miner_2_seed = vec![2, 2, 2, 2]; + let btc_miner_1_pk = Keychain::default(btc_miner_1_seed.clone()).get_pub_key(); + let btc_miner_2_pk = Keychain::default(btc_miner_2_seed.clone()).get_pub_key(); + + let node_1_rpc = 51024; + let node_1_p2p = 51023; + let node_2_rpc = 51026; + let node_2_p2p = 51025; + + let node_1_rpc_bind = format!("127.0.0.1:{}", node_1_rpc); + let node_2_rpc_bind = format!("127.0.0.1:{}", node_2_rpc); + let mut node_2_listeners = Vec::new(); + + // partition the signer set so that ~half are listening and using node 1 for RPC and events, + // and the rest are using node 2 + + let mut signer_test: SignerTest = SignerTest::new_with_config_modifications( + num_signers, + vec![(sender_addr.clone(), send_amt + send_fee)], + Some(Duration::from_secs(15)), + |signer_config| { + let node_host = if signer_config.endpoint.port() % 2 == 0 { + &node_1_rpc_bind + } else { + &node_2_rpc_bind + }; + signer_config.node_host = node_host.to_string(); + // we're deliberately stalling proposals: don't punish this in this test! + signer_config.block_proposal_timeout = Duration::from_secs(240); + // make sure that we don't allow forking due to burn block timing + signer_config.first_proposal_burn_block_timing = Duration::from_secs(1); + }, + |config| { + let localhost = "127.0.0.1"; + config.node.rpc_bind = format!("{}:{}", localhost, node_1_rpc); + config.node.p2p_bind = format!("{}:{}", localhost, node_1_p2p); + config.node.data_url = format!("http://{}:{}", localhost, node_1_rpc); + config.node.p2p_address = format!("{}:{}", localhost, node_1_p2p); + + config.node.seed = btc_miner_1_seed.clone(); + config.node.local_peer_seed = btc_miner_1_seed.clone(); + config.burnchain.local_mining_public_key = Some(btc_miner_1_pk.to_hex()); + config.miner.mining_key = Some(Secp256k1PrivateKey::from_seed(&[1])); + + config.events_observers.retain(|listener| { + let Ok(addr) = std::net::SocketAddr::from_str(&listener.endpoint) else { + warn!( + "Cannot parse {} to a socket, assuming it isn't a signer-listener binding", + listener.endpoint + ); + return true; + }; + if addr.port() % 2 == 0 || addr.port() == test_observer::EVENT_OBSERVER_PORT { + return true; + } + node_2_listeners.push(listener.clone()); + false + }) + }, + &[btc_miner_1_pk.clone(), btc_miner_2_pk.clone()], + ); + let conf = signer_test.running_nodes.conf.clone(); + let mut conf_node_2 = conf.clone(); + let localhost = "127.0.0.1"; + conf_node_2.node.rpc_bind = format!("{}:{}", localhost, node_2_rpc); + conf_node_2.node.p2p_bind = format!("{}:{}", localhost, node_2_p2p); + conf_node_2.node.data_url = format!("http://{}:{}", localhost, node_2_rpc); + conf_node_2.node.p2p_address = format!("{}:{}", localhost, node_2_p2p); + conf_node_2.node.seed = btc_miner_2_seed.clone(); + conf_node_2.burnchain.local_mining_public_key = Some(btc_miner_2_pk.to_hex()); + conf_node_2.node.local_peer_seed = btc_miner_2_seed.clone(); + conf_node_2.node.miner = true; + conf_node_2.events_observers.clear(); + conf_node_2.events_observers.extend(node_2_listeners); + conf_node_2.miner.mining_key = Some(Secp256k1PrivateKey::from_seed(&[2])); + assert!(!conf_node_2.events_observers.is_empty()); + + let node_1_sk = Secp256k1PrivateKey::from_seed(&conf.node.local_peer_seed); + let node_1_pk = StacksPublicKey::from_private(&node_1_sk); + + conf_node_2.node.working_dir = format!("{}-{}", conf_node_2.node.working_dir, "1"); + + conf_node_2.node.set_bootstrap_nodes( + format!("{}@{}", &node_1_pk.to_hex(), conf.node.p2p_bind), + conf.burnchain.chain_id, + conf.burnchain.peer_version, + ); + + let mut run_loop_2 = boot_nakamoto::BootRunLoop::new(conf_node_2.clone()).unwrap(); + let Counters { + naka_skip_commit_op, + naka_submitted_commits: second_miner_commits_submitted, + .. + } = run_loop_2.counters(); + let _run_loop_2_thread = thread::Builder::new() + .name("run_loop_2".into()) + .spawn(move || run_loop_2.start(None, 0)) + .unwrap(); + + signer_test.boot_to_epoch_3(); + let pre_nakamoto_peer_1_height = get_chain_info(&conf).stacks_tip_height; + + naka_skip_commit_op.0.lock().unwrap().replace(false); + info!("------------------------- Reached Epoch 3.0 -------------------------"); + + let mut sortitions_seen = Vec::new(); + let run_sortition = || { + info!("Pausing stacks block proposal to force an empty tenure commit from RL2"); + TEST_BROADCAST_STALL.lock().unwrap().replace(true); + + let rl2_commits_before = second_miner_commits_submitted.load(Ordering::SeqCst); + + signer_test + .running_nodes + .btc_regtest_controller + .build_next_block(1); + naka_skip_commit_op.0.lock().unwrap().replace(false); + + // wait until a commit is submitted by run_loop_2 + wait_for(60, || { + let commits_count = second_miner_commits_submitted.load(Ordering::SeqCst); + Ok(commits_count > rl2_commits_before) + }) + .unwrap(); + + // fetch the current sortition info + let sortdb = conf.get_burnchain().open_sortition_db(true).unwrap(); + let sort_tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); + + // block commits from RL2 -- this will block until the start of the next iteration + // in this loop. + naka_skip_commit_op.0.lock().unwrap().replace(true); + // ensure RL1 performs an RBF after unblock block broadcast + let rl1_commits_before = signer_test + .running_nodes + .commits_submitted + .load(Ordering::SeqCst); + + // unblock block mining + let blocks_len = test_observer::get_blocks().len(); + TEST_BROADCAST_STALL.lock().unwrap().replace(false); + + // wait for a block to be processed (or timeout!) + if let Err(_) = wait_for(60, || Ok(test_observer::get_blocks().len() > blocks_len)) { + info!("Timeout waiting for a block process: assuming this is because RL2 attempted to fork-- will check at end of test"); + return (sort_tip, false); + } + + info!("Nakamoto block processed, waiting for commit from RL1"); + + // wait for a commit from RL1 + wait_for(60, || { + let commits_count = signer_test + .running_nodes + .commits_submitted + .load(Ordering::SeqCst); + Ok(commits_count > rl1_commits_before) + }) + .unwrap(); + + // sleep for 1 second to prevent the block timing from allowing a fork by the signer set + thread::sleep(Duration::from_secs(1)); + (sort_tip, true) + }; + + let mut won_by_miner_2_but_no_tenure = false; + let mut won_by_miner_1_after_tenureless_miner_2 = false; + let miner_1_pk = StacksPublicKey::from_private(conf.miner.mining_key.as_ref().unwrap()); + // miner 2 is expected to be valid iff: + // (a) its the first nakamoto tenure + // (b) the prior sortition didn't have a tenure (because by this time RL2 will have up-to-date block processing) + let mut expects_miner_2_to_be_valid = true; + + // due to the random nature of mining sortitions, the way this test is structured + // is that keeps track of two scenarios that we want to cover, and once enough sortitions + // have been produced to cover those scenarios, it stops and checks the results at the end. + while !(won_by_miner_2_but_no_tenure && won_by_miner_1_after_tenureless_miner_2) { + if sortitions_seen.len() >= 20 { + panic!("Produced 20 sortitions, but didn't cover the test scenarios, aborting"); + } + let (sortition_data, had_tenure) = run_sortition(); + sortitions_seen.push((sortition_data.clone(), had_tenure)); + + let nakamoto_headers: HashMap<_, _> = get_nakamoto_headers(&conf) + .into_iter() + .map(|header| (header.consensus_hash.clone(), header)) + .collect(); + + if had_tenure { + let header_info = nakamoto_headers + .get(&sortition_data.consensus_hash) + .unwrap(); + let header = header_info + .anchored_header + .as_stacks_nakamoto() + .unwrap() + .clone(); + let mined_by_miner_1 = miner_1_pk + .verify( + header.miner_signature_hash().as_bytes(), + &header.miner_signature, + ) + .unwrap(); + + info!("Block check"; + "height" => header.chain_length, + "consensus_hash" => %header.consensus_hash, + "block_hash" => %header.block_hash(), + "stacks_block_id" => %header.block_id(), + "mined_by_miner_1?" => mined_by_miner_1, + "expects_miner_2_to_be_valid?" => expects_miner_2_to_be_valid); + if !mined_by_miner_1 { + assert!(expects_miner_2_to_be_valid, "If a block was produced by miner 2, we should have expected miner 2 to be valid"); + } else if won_by_miner_2_but_no_tenure { + // the tenure was won by miner 1, they produced a block, and this follows a tenure that miner 2 won but couldn't + // mine during because they tried to fork. + won_by_miner_1_after_tenureless_miner_2 = true; + } + + // even if it was mined by miner 2, their next block commit should be invalid! + expects_miner_2_to_be_valid = false; + } else { + info!("Sortition without tenure"; "expects_miner_2_to_be_valid?" => expects_miner_2_to_be_valid); + assert!(nakamoto_headers + .get(&sortition_data.consensus_hash) + .is_none()); + assert!(!expects_miner_2_to_be_valid, "If no blocks were produced in the tenure, it should be because miner 2 committed to a fork"); + won_by_miner_2_but_no_tenure = true; + expects_miner_2_to_be_valid = true; + } + } + + let peer_1_height = get_chain_info(&conf).stacks_tip_height; + let peer_2_height = get_chain_info(&conf_node_2).stacks_tip_height; + info!("Peer height information"; "peer_1" => peer_1_height, "peer_2" => peer_2_height, "pre_naka_height" => pre_nakamoto_peer_1_height); + assert_eq!(peer_1_height, peer_2_height); + + let nakamoto_blocks_count = get_nakamoto_headers(&conf).len(); + + assert_eq!( + peer_1_height - pre_nakamoto_peer_1_height, + u64::try_from(nakamoto_blocks_count).unwrap(), + "There should be no forks in this test" + ); + + signer_test.shutdown(); +} + +#[test] +#[ignore] +/// This test checks the behavior at the end of a tenure. Specifically: +/// - The miner will broadcast the last block of the tenure, even if the signing is +/// completed after the next burn block arrives +/// - The signers will not sign a block that arrives after the next burn block, but +/// will finish a signing process that was in progress when the next burn block arrived +fn end_of_tenure() { + if env::var("BITCOIND_TEST") != Ok("1".into()) { + return; + } + + tracing_subscriber::registry() + .with(fmt::layer()) + .with(EnvFilter::from_default_env()) + .init(); + + info!("------------------------- Test Setup -------------------------"); + let num_signers = 5; + let sender_sk = Secp256k1PrivateKey::new(); + let sender_addr = tests::to_addr(&sender_sk); + let send_amt = 100; + let send_fee = 180; + let recipient = PrincipalData::from(StacksAddress::burn_address(false)); + let mut signer_test: SignerTest = SignerTest::new( + num_signers, + vec![(sender_addr.clone(), send_amt + send_fee)], + Some(Duration::from_secs(500)), + ); + let http_origin = format!("http://{}", &signer_test.running_nodes.conf.node.rpc_bind); + let long_timeout = Duration::from_secs(200); + let short_timeout = Duration::from_secs(20); + + signer_test.boot_to_epoch_3(); + let curr_reward_cycle = signer_test.get_current_reward_cycle(); + // Advance to one before the next reward cycle to ensure we are on the reward cycle boundary + let final_reward_cycle = curr_reward_cycle + 1; + let final_reward_cycle_height_boundary = signer_test + .running_nodes + .btc_regtest_controller + .get_burnchain() + .reward_cycle_to_block_height(final_reward_cycle) + - 2; + + info!("------------------------- Test Mine to Next Reward Cycle Boundary -------------------------"); + signer_test.run_until_burnchain_height_nakamoto( + long_timeout, + final_reward_cycle_height_boundary, + num_signers, + ); + println!("Advanced to nexct reward cycle boundary: {final_reward_cycle_height_boundary}"); + assert_eq!( + signer_test.get_current_reward_cycle(), + final_reward_cycle - 1 + ); + + info!("------------------------- Test Block Validation Stalled -------------------------"); + TEST_VALIDATE_STALL.lock().unwrap().replace(true); + + let proposals_before = signer_test + .running_nodes + .nakamoto_blocks_proposed + .load(Ordering::SeqCst); + let blocks_before = signer_test + .running_nodes + .nakamoto_blocks_mined + .load(Ordering::SeqCst); + + let info = get_chain_info(&signer_test.running_nodes.conf); + let start_height = info.stacks_tip_height; + // submit a tx so that the miner will mine an extra block + let sender_nonce = 0; + let transfer_tx = + make_stacks_transfer(&sender_sk, sender_nonce, send_fee, &recipient, send_amt); + submit_tx(&http_origin, &transfer_tx); + + info!("Submitted transfer tx and waiting for block proposal"); + let start_time = Instant::now(); + while signer_test + .running_nodes + .nakamoto_blocks_proposed + .load(Ordering::SeqCst) + <= proposals_before + { + assert!( + start_time.elapsed() <= short_timeout, + "Timed out waiting for block proposal" + ); + std::thread::sleep(Duration::from_millis(100)); + } + + info!("Triggering a new block to be mined"); + + // Mine a block into the next reward cycle + let commits_before = signer_test + .running_nodes + .commits_submitted + .load(Ordering::SeqCst); + next_block_and( + &mut signer_test.running_nodes.btc_regtest_controller, + 10, + || { + let commits_count = signer_test + .running_nodes + .commits_submitted + .load(Ordering::SeqCst); + Ok(commits_count > commits_before) + }, + ) + .unwrap(); + + // Mine a few blocks so we are well into the next reward cycle + for _ in 0..2 { + next_block_and( + &mut signer_test.running_nodes.btc_regtest_controller, + 10, + || Ok(true), + ) + .unwrap(); + } + assert_eq!(signer_test.get_current_reward_cycle(), final_reward_cycle); + + while test_observer::get_burn_blocks() + .last() + .unwrap() + .get("burn_block_height") + .unwrap() + .as_u64() + .unwrap() + < final_reward_cycle_height_boundary + 1 + { + assert!( + start_time.elapsed() <= short_timeout, + "Timed out waiting for burn block events" + ); + std::thread::sleep(Duration::from_millis(100)); + } + + signer_test.wait_for_cycle(30, final_reward_cycle); + + info!("Block proposed and burn blocks consumed. Verifying that stacks block is still not processed"); + + assert_eq!( + signer_test + .running_nodes + .nakamoto_blocks_mined + .load(Ordering::SeqCst), + blocks_before + ); + + info!("Unpausing block validation and waiting for block to be processed"); + // Disable the stall and wait for the block to be processed + TEST_VALIDATE_STALL.lock().unwrap().replace(false); + let start_time = Instant::now(); + while signer_test + .running_nodes + .nakamoto_blocks_mined + .load(Ordering::SeqCst) + <= blocks_before + { + assert!( + start_time.elapsed() <= short_timeout, + "Timed out waiting for block to be mined" + ); + std::thread::sleep(Duration::from_millis(100)); + } + + let info = get_chain_info(&signer_test.running_nodes.conf); + assert_eq!(info.stacks_tip_height, start_height + 1); + + signer_test.shutdown(); +} + +#[test] +#[ignore] +/// This test checks that the miner will retry when signature collection times out. +fn retry_on_timeout() { + if env::var("BITCOIND_TEST") != Ok("1".into()) { + return; + } + + tracing_subscriber::registry() + .with(fmt::layer()) + .with(EnvFilter::from_default_env()) + .init(); + + info!("------------------------- Test Setup -------------------------"); + let num_signers = 5; + let sender_sk = Secp256k1PrivateKey::new(); + let sender_addr = tests::to_addr(&sender_sk); + let send_amt = 100; + let send_fee = 180; + let recipient = PrincipalData::from(StacksAddress::burn_address(false)); + let mut signer_test: SignerTest = SignerTest::new( + num_signers, + vec![(sender_addr.clone(), send_amt + send_fee)], + Some(Duration::from_secs(5)), + ); + let http_origin = format!("http://{}", &signer_test.running_nodes.conf.node.rpc_bind); + + signer_test.boot_to_epoch_3(); + + signer_test.mine_nakamoto_block(Duration::from_secs(30)); + + // Stall block validation so the signers will not be able to sign. + TEST_VALIDATE_STALL.lock().unwrap().replace(true); + + let proposals_before = signer_test + .running_nodes + .nakamoto_blocks_proposed + .load(Ordering::SeqCst); + let blocks_before = signer_test + .running_nodes + .nakamoto_blocks_mined + .load(Ordering::SeqCst); + + // submit a tx so that the miner will mine a block + let sender_nonce = 0; + let transfer_tx = + make_stacks_transfer(&sender_sk, sender_nonce, send_fee, &recipient, send_amt); + submit_tx(&http_origin, &transfer_tx); + + info!("Submitted transfer tx and waiting for block proposal"); + loop { + let blocks_proposed = signer_test + .running_nodes + .nakamoto_blocks_proposed + .load(Ordering::SeqCst); + if blocks_proposed > proposals_before { + break; + } + std::thread::sleep(Duration::from_millis(100)); + } + + info!("Block proposed, verifying that it is not processed"); + + // Wait 10 seconds to be sure that the timeout has occurred + std::thread::sleep(Duration::from_secs(10)); + assert_eq!( + signer_test + .running_nodes + .nakamoto_blocks_mined + .load(Ordering::SeqCst), + blocks_before + ); + + // Disable the stall and wait for the block to be processed on retry + info!("Disable the stall and wait for the block to be processed"); + TEST_VALIDATE_STALL.lock().unwrap().replace(false); + loop { + let blocks_mined = signer_test + .running_nodes + .nakamoto_blocks_mined + .load(Ordering::SeqCst); + if blocks_mined > blocks_before { + break; + } + std::thread::sleep(Duration::from_millis(100)); + } + + signer_test.shutdown(); +} + +#[test] +#[ignore] +/// This test checks the behaviour of signers when a sortition is empty. Specifically: +/// - An empty sortition will cause the signers to mark a miner as misbehaving once a timeout is exceeded. +/// - The empty sortition will trigger the miner to attempt a tenure extend. +/// - Signers will accept the tenure extend and sign subsequent blocks built off the old sortition +fn empty_sortition() { + if env::var("BITCOIND_TEST") != Ok("1".into()) { + return; + } + + tracing_subscriber::registry() + .with(fmt::layer()) + .with(EnvFilter::from_default_env()) + .init(); + + info!("------------------------- Test Setup -------------------------"); + let num_signers = 5; + let sender_sk = Secp256k1PrivateKey::new(); + let sender_addr = tests::to_addr(&sender_sk); + let send_amt = 100; + let send_fee = 180; + let recipient = PrincipalData::from(StacksAddress::burn_address(false)); + let block_proposal_timeout = Duration::from_secs(5); + let mut signer_test: SignerTest = SignerTest::new_with_config_modifications( + num_signers, + vec![(sender_addr.clone(), send_amt + send_fee)], + Some(Duration::from_secs(5)), + |config| { + // make the duration long enough that the miner will be marked as malicious + config.block_proposal_timeout = block_proposal_timeout; + }, + |_| {}, + &[], + ); + let http_origin = format!("http://{}", &signer_test.running_nodes.conf.node.rpc_bind); + let short_timeout = Duration::from_secs(20); + + signer_test.boot_to_epoch_3(); + + TEST_BROADCAST_STALL.lock().unwrap().replace(true); + + info!("------------------------- Test Mine Regular Tenure A -------------------------"); + let commits_before = signer_test + .running_nodes + .commits_submitted + .load(Ordering::SeqCst); + // Mine a regular tenure + next_block_and( + &mut signer_test.running_nodes.btc_regtest_controller, + 60, + || { + let commits_count = signer_test + .running_nodes + .commits_submitted + .load(Ordering::SeqCst); + Ok(commits_count > commits_before) + }, + ) + .unwrap(); + + info!("------------------------- Test Mine Empty Tenure B -------------------------"); + info!("Pausing stacks block mining to trigger an empty sortition."); + let blocks_before = signer_test + .running_nodes + .nakamoto_blocks_mined + .load(Ordering::SeqCst); + let commits_before = signer_test + .running_nodes + .commits_submitted + .load(Ordering::SeqCst); + // Start new Tenure B + // In the next block, the miner should win the tenure + next_block_and( + &mut signer_test.running_nodes.btc_regtest_controller, + 60, + || { + let commits_count = signer_test + .running_nodes + .commits_submitted + .load(Ordering::SeqCst); + Ok(commits_count > commits_before) + }, + ) + .unwrap(); + + info!("Pausing stacks block proposal to force an empty tenure"); + TEST_BROADCAST_STALL.lock().unwrap().replace(true); + + info!("Pausing commit op to prevent tenure C from starting..."); + signer_test + .running_nodes + .nakamoto_test_skip_commit_op + .0 + .lock() + .unwrap() + .replace(true); + + let blocks_after = signer_test + .running_nodes + .nakamoto_blocks_mined + .load(Ordering::SeqCst); + assert_eq!(blocks_after, blocks_before); + + // submit a tx so that the miner will mine an extra block + let sender_nonce = 0; + let transfer_tx = + make_stacks_transfer(&sender_sk, sender_nonce, send_fee, &recipient, send_amt); + submit_tx(&http_origin, &transfer_tx); + + std::thread::sleep(block_proposal_timeout.add(Duration::from_secs(1))); + + TEST_BROADCAST_STALL.lock().unwrap().replace(false); + + info!("------------------------- Test Delayed Block is Rejected -------------------------"); + let reward_cycle = signer_test.get_current_reward_cycle(); + let mut stackerdb = StackerDB::new( + &signer_test.running_nodes.conf.node.rpc_bind, + StacksPrivateKey::new(), // We are just reading so don't care what the key is + false, + reward_cycle, + SignerSlotID(0), // We are just reading so again, don't care about index. + ); + + let signer_slot_ids: Vec<_> = signer_test + .get_signer_indices(reward_cycle) + .iter() + .map(|id| id.0) + .collect(); + assert_eq!(signer_slot_ids.len(), num_signers); + + // The miner's proposed block should get rejected by all the signers + let mut found_rejections = Vec::new(); + wait_for(short_timeout.as_secs(), || { + for slot_id in signer_slot_ids.iter() { + if found_rejections.contains(slot_id) { + continue; + } + let mut latest_msgs = StackerDB::get_messages( + stackerdb + .get_session_mut(&MessageSlotID::BlockResponse) + .expect("Failed to get BlockResponse stackerdb session"), + &[*slot_id] + ).expect("Failed to get message from stackerdb"); + assert!(latest_msgs.len() <= 1); + let Some(latest_msg) = latest_msgs.pop() else { + info!("No message yet from slot #{slot_id}, will wait to try again"); + continue; + }; + if let SignerMessage::BlockResponse(BlockResponse::Rejected(BlockRejection { + reason_code, + .. + })) = latest_msg + { + assert!(matches!(reason_code, RejectCode::SortitionViewMismatch)); + found_rejections.push(*slot_id); + } else { + info!("Latest message from slot #{slot_id} isn't a block rejection, will wait to see if the signer updates to a rejection"); + } + } + // wait until we've found rejections for all the signers + Ok(found_rejections.len() == signer_slot_ids.len()) + }).unwrap(); + signer_test.shutdown(); +} + +#[test] +#[ignore] +/// This test checks that Epoch 2.5 signers will issue a mock signature per burn block they receive. +fn mock_sign_epoch_25() { + if env::var("BITCOIND_TEST") != Ok("1".into()) { + return; + } + + tracing_subscriber::registry() + .with(fmt::layer()) + .with(EnvFilter::from_default_env()) + .init(); + + info!("------------------------- Test Setup -------------------------"); + let num_signers = 5; + let sender_sk = Secp256k1PrivateKey::new(); + let sender_addr = tests::to_addr(&sender_sk); + let send_amt = 100; + let send_fee = 180; + + let mut signer_test: SignerTest = SignerTest::new_with_config_modifications( + num_signers, + vec![(sender_addr.clone(), send_amt + send_fee)], + Some(Duration::from_secs(5)), + |_| {}, + |node_config| { + let epochs = node_config.burnchain.epochs.as_mut().unwrap(); + for epoch in epochs.iter_mut() { + if epoch.epoch_id == StacksEpochId::Epoch25 { + epoch.end_height = 251; + } + if epoch.epoch_id == StacksEpochId::Epoch30 { + epoch.start_height = 251; + } + } + }, + &[], + ); + + let epochs = signer_test + .running_nodes + .conf + .burnchain + .epochs + .clone() + .unwrap(); + let epoch_3 = &epochs[StacksEpoch::find_epoch_by_id(&epochs, StacksEpochId::Epoch30).unwrap()]; + let epoch_3_start_height = epoch_3.start_height; + + signer_test.boot_to_epoch_25_reward_cycle(); + + info!("------------------------- Test Processing Epoch 2.5 Tenures -------------------------"); + + // Mine until epoch 3.0 and ensure that no more mock signatures are received + let mut reward_cycle = signer_test.get_current_reward_cycle(); + let mut stackerdb = StackerDB::new( + &signer_test.running_nodes.conf.node.rpc_bind, + StacksPrivateKey::new(), // We are just reading so don't care what the key is + false, + reward_cycle, + SignerSlotID(0), // We are just reading so again, don't care about index. + ); + let mut signer_slot_ids: Vec<_> = signer_test + .get_signer_indices(reward_cycle) + .iter() + .map(|id| id.0) + .collect(); + assert_eq!(signer_slot_ids.len(), num_signers); + // Mine until epoch 3.0 and ensure we get a new mock signature per epoch 2.5 sortition + let main_poll_time = Instant::now(); + while signer_test + .running_nodes + .btc_regtest_controller + .get_headers_height() + < epoch_3_start_height + { + next_block_and( + &mut signer_test.running_nodes.btc_regtest_controller, + 60, + || Ok(true), + ) + .unwrap(); + let current_burn_block_height = signer_test + .running_nodes + .btc_regtest_controller + .get_headers_height(); + if current_burn_block_height + % signer_test + .running_nodes + .conf + .get_burnchain() + .pox_constants + .reward_cycle_length as u64 + == 0 + { + reward_cycle += 1; + debug!("Rolling over reward cycle to {:?}", reward_cycle); + stackerdb = StackerDB::new( + &signer_test.running_nodes.conf.node.rpc_bind, + StacksPrivateKey::new(), // We are just reading so don't care what the key is + false, + reward_cycle, + SignerSlotID(0), // We are just reading so again, don't care about index. + ); + signer_slot_ids = signer_test + .get_signer_indices(reward_cycle) + .iter() + .map(|id| id.0) + .collect(); + assert_eq!(signer_slot_ids.len(), num_signers); + } + let mut mock_signatures = vec![]; + let mock_poll_time = Instant::now(); + debug!("Waiting for mock signatures for burn block height {current_burn_block_height}"); + while mock_signatures.len() != num_signers { + std::thread::sleep(Duration::from_millis(100)); + let messages: Vec = StackerDB::get_messages( + stackerdb + .get_session_mut(&MessageSlotID::MockSignature) + .expect("Failed to get BlockResponse stackerdb session"), + &signer_slot_ids, + ) + .expect("Failed to get message from stackerdb"); + for message in messages { + if let SignerMessage::MockSignature(mock_signature) = message { + if mock_signature.sign_data.event_burn_block_height == current_burn_block_height + { + if !mock_signatures.contains(&mock_signature) { + mock_signatures.push(mock_signature); + } + } + } + } + assert!( + mock_poll_time.elapsed() <= Duration::from_secs(15), + "Failed to find mock signatures within timeout" + ); + } + assert!( + main_poll_time.elapsed() <= Duration::from_secs(45), + "Timed out waiting to advance epoch 3.0" + ); + } + + info!("------------------------- Test Processing Epoch 3.0 Tenure -------------------------"); + let old_messages: Vec = StackerDB::get_messages( + stackerdb + .get_session_mut(&MessageSlotID::MockSignature) + .expect("Failed to get BlockResponse stackerdb session"), + &signer_slot_ids, + ) + .expect("Failed to get message from stackerdb"); + let old_signatures = old_messages + .iter() + .filter_map(|message| { + if let SignerMessage::MockSignature(mock_signature) = message { + Some(mock_signature) + } else { + None + } + }) + .collect::>(); + next_block_and( + &mut signer_test.running_nodes.btc_regtest_controller, + 60, + || Ok(true), + ) + .unwrap(); + // Wait a bit to ensure no new mock signatures show up + std::thread::sleep(Duration::from_secs(5)); + let new_messages: Vec = StackerDB::get_messages( + stackerdb + .get_session_mut(&MessageSlotID::MockSignature) + .expect("Failed to get BlockResponse stackerdb session"), + &signer_slot_ids, + ) + .expect("Failed to get message from stackerdb"); + let new_signatures = new_messages + .iter() + .filter_map(|message| { + if let SignerMessage::MockSignature(mock_signature) = message { + Some(mock_signature) + } else { + None + } + }) + .collect::>(); + assert_eq!(old_signatures, new_signatures); +} + +#[test] +#[ignore] +/// This test asserts that signer set rollover works as expected. +/// Specifically, if a new set of signers are registered for an upcoming reward cycle, +/// old signers shut down operation and the new signers take over with the commencement of +/// the next reward cycle. +fn signer_set_rollover() { + tracing_subscriber::registry() + .with(fmt::layer()) + .with(EnvFilter::from_default_env()) + .init(); + + info!("------------------------- Test Setup -------------------------"); + let num_signers = 5; + let new_num_signers = 4; + + let new_signer_private_keys: Vec<_> = (0..new_num_signers) + .into_iter() + .map(|_| StacksPrivateKey::new()) + .collect(); + let new_signer_public_keys: Vec<_> = new_signer_private_keys + .iter() + .map(|sk| Secp256k1PublicKey::from_private(sk).to_bytes_compressed()) + .collect(); + let new_signer_addresses: Vec<_> = new_signer_private_keys + .iter() + .map(|sk| tests::to_addr(sk)) + .collect(); + let sender_sk = Secp256k1PrivateKey::new(); + let sender_addr = tests::to_addr(&sender_sk); + let send_amt = 100; + let send_fee = 180; + let recipient = PrincipalData::from(StacksAddress::burn_address(false)); + + let mut initial_balances = new_signer_addresses + .iter() + .map(|addr| (addr.clone(), POX_4_DEFAULT_STACKER_BALANCE)) + .collect::>(); + + initial_balances.push((sender_addr.clone(), (send_amt + send_fee) * 4)); + + let run_stamp = rand::random(); + let mut rng = rand::thread_rng(); + + let mut buf = [0u8; 2]; + rng.fill_bytes(&mut buf); + let rpc_port = u16::from_be_bytes(buf.try_into().unwrap()).saturating_add(1025) - 1; // use a non-privileged port between 1024 and 65534 + let rpc_bind = format!("127.0.0.1:{}", rpc_port); + + // Setup the new signers that will take over + let new_signer_configs = build_signer_config_tomls( + &new_signer_private_keys, + &rpc_bind, + Some(Duration::from_millis(128)), // Timeout defaults to 5 seconds. Let's override it to 128 milliseconds. + &Network::Testnet, + "12345", + run_stamp, + 3000 + num_signers, + Some(100_000), + None, + Some(9000 + num_signers), + ); + + let new_spawned_signers: Vec<_> = (0..new_num_signers) + .into_iter() + .map(|i| { + info!("spawning signer"); + let signer_config = + SignerConfig::load_from_str(&new_signer_configs[i as usize]).unwrap(); + SpawnedSigner::new(signer_config) + }) + .collect(); + + // Boot with some initial signer set + let mut signer_test: SignerTest = SignerTest::new_with_config_modifications( + num_signers, + initial_balances, + None, + |_| {}, + |naka_conf| { + for toml in new_signer_configs.clone() { + let signer_config = SignerConfig::load_from_str(&toml).unwrap(); + info!( + "---- Adding signer endpoint to naka conf ({}) ----", + signer_config.endpoint + ); + + naka_conf.events_observers.insert(EventObserverConfig { + endpoint: format!("{}", signer_config.endpoint), + events_keys: vec![ + EventKeyType::StackerDBChunks, + EventKeyType::BlockProposal, + EventKeyType::BurnchainBlocks, + ], + }); + } + naka_conf.node.rpc_bind = rpc_bind.clone(); + }, + &[], + ); + assert_eq!( + new_spawned_signers[0].config.node_host, + signer_test.running_nodes.conf.node.rpc_bind + ); + // Only stack for one cycle so that the signer set changes + signer_test.num_stacking_cycles = 1_u64; + + let http_origin = format!("http://{}", &signer_test.running_nodes.conf.node.rpc_bind); + let short_timeout = Duration::from_secs(20); + + // Verify that naka_conf has our new signer's event observers + for toml in &new_signer_configs { + let signer_config = SignerConfig::load_from_str(&toml).unwrap(); + let endpoint = format!("{}", signer_config.endpoint); + assert!(signer_test + .running_nodes + .conf + .events_observers + .iter() + .any(|observer| observer.endpoint == endpoint)); + } + + // Advance to the first reward cycle, stacking to the old signers beforehand + + info!("---- Booting to epoch 3 -----"); + signer_test.boot_to_epoch_3(); + + // verify that the first reward cycle has the old signers in the reward set + let reward_cycle = signer_test.get_current_reward_cycle(); + let signer_test_public_keys: Vec<_> = signer_test + .signer_stacks_private_keys + .iter() + .map(|sk| Secp256k1PublicKey::from_private(sk).to_bytes_compressed()) + .collect(); + + info!("---- Verifying that the current signers are the old signers ----"); + let current_signers = signer_test.get_reward_set_signers(reward_cycle); + assert_eq!(current_signers.len(), num_signers as usize); + // Verify that the current signers are the same as the old signers + for signer in current_signers.iter() { + assert!(signer_test_public_keys.contains(&signer.signing_key.to_vec())); + assert!(!new_signer_public_keys.contains(&signer.signing_key.to_vec())); + } + + info!("---- Mining a block to trigger the signer set -----"); + // submit a tx so that the miner will mine an extra block + let sender_nonce = 0; + let transfer_tx = + make_stacks_transfer(&sender_sk, sender_nonce, send_fee, &recipient, send_amt); + submit_tx(&http_origin, &transfer_tx); + let mined_block = signer_test.mine_nakamoto_block(short_timeout); + let block_sighash = mined_block.signer_signature_hash; + let signer_signatures = mined_block.signer_signature; + + // verify the mined_block signatures against the OLD signer set + for signature in signer_signatures.iter() { + let pk = Secp256k1PublicKey::recover_to_pubkey(block_sighash.bits(), signature) + .expect("FATAL: Failed to recover pubkey from block sighash"); + assert!(signer_test_public_keys.contains(&pk.to_bytes_compressed())); + assert!(!new_signer_public_keys.contains(&pk.to_bytes_compressed())); + } + + // advance to the next reward cycle, stacking to the new signers beforehand + let reward_cycle = signer_test.get_current_reward_cycle(); + + info!("---- Stacking new signers -----"); + + let burn_block_height = signer_test + .running_nodes + .btc_regtest_controller + .get_headers_height(); + for stacker_sk in new_signer_private_keys.iter() { + let pox_addr = PoxAddress::from_legacy( + AddressHashMode::SerializeP2PKH, + tests::to_addr(&stacker_sk).bytes, + ); + let pox_addr_tuple: clarity::vm::Value = + pox_addr.clone().as_clarity_tuple().unwrap().into(); + let signature = make_pox_4_signer_key_signature( + &pox_addr, + &stacker_sk, + reward_cycle.into(), + &Pox4SignatureTopic::StackStx, + CHAIN_ID_TESTNET, + 1_u128, + u128::MAX, + 1, + ) + .unwrap() + .to_rsv(); + + let signer_pk = Secp256k1PublicKey::from_private(stacker_sk); + let stacking_tx = tests::make_contract_call( + &stacker_sk, + 0, + 1000, + &StacksAddress::burn_address(false), + "pox-4", + "stack-stx", + &[ + clarity::vm::Value::UInt(POX_4_DEFAULT_STACKER_STX_AMT), + pox_addr_tuple.clone(), + clarity::vm::Value::UInt(burn_block_height as u128), + clarity::vm::Value::UInt(1), + clarity::vm::Value::some(clarity::vm::Value::buff_from(signature).unwrap()) + .unwrap(), + clarity::vm::Value::buff_from(signer_pk.to_bytes_compressed()).unwrap(), + clarity::vm::Value::UInt(u128::MAX), + clarity::vm::Value::UInt(1), + ], + ); + submit_tx(&http_origin, &stacking_tx); + } + + signer_test.mine_nakamoto_block(short_timeout); + + let next_reward_cycle = reward_cycle.saturating_add(1); + + let next_cycle_height = signer_test + .running_nodes + .btc_regtest_controller + .get_burnchain() + .reward_cycle_to_block_height(next_reward_cycle) + .saturating_add(1); + + info!("---- Mining to next reward set calculation -----"); + signer_test.run_until_burnchain_height_nakamoto( + Duration::from_secs(60), + next_cycle_height.saturating_sub(3), + new_num_signers, + ); + + // Verify that the new reward set is the new signers + let reward_set = signer_test.get_reward_set_signers(next_reward_cycle); + for signer in reward_set.iter() { + assert!(!signer_test_public_keys.contains(&signer.signing_key.to_vec())); + assert!(new_signer_public_keys.contains(&signer.signing_key.to_vec())); + } + + info!( + "---- Mining to the next reward cycle (block {}) -----", + next_cycle_height + ); + signer_test.run_until_burnchain_height_nakamoto( + Duration::from_secs(60), + next_cycle_height, + new_num_signers, + ); + let new_reward_cycle = signer_test.get_current_reward_cycle(); + assert_eq!(new_reward_cycle, reward_cycle.saturating_add(1)); + + info!("---- Verifying that the current signers are the new signers ----"); + let current_signers = signer_test.get_reward_set_signers(new_reward_cycle); + assert_eq!(current_signers.len(), new_num_signers as usize); + for signer in current_signers.iter() { + assert!(!signer_test_public_keys.contains(&signer.signing_key.to_vec())); + assert!(new_signer_public_keys.contains(&signer.signing_key.to_vec())); + } + + info!("---- Mining a block to verify new signer set -----"); + let sender_nonce = 1; + let transfer_tx = + make_stacks_transfer(&sender_sk, sender_nonce, send_fee, &recipient, send_amt); + submit_tx(&http_origin, &transfer_tx); + let mined_block = signer_test.mine_nakamoto_block(short_timeout); + + info!("---- Verifying that the new signers signed the block -----"); + let signer_signatures = mined_block.signer_signature; + + // verify the mined_block signatures against the NEW signer set + for signature in signer_signatures.iter() { + let pk = Secp256k1PublicKey::recover_to_pubkey(block_sighash.bits(), signature) + .expect("FATAL: Failed to recover pubkey from block sighash"); + assert!(!signer_test_public_keys.contains(&pk.to_bytes_compressed())); + assert!(new_signer_public_keys.contains(&pk.to_bytes_compressed())); + } + + signer_test.shutdown(); + for signer in new_spawned_signers { + assert!(signer.stop().is_none()); + } +} diff --git a/testnet/stacks-node/src/tests/signer.rs b/testnet/stacks-node/src/tests/signer/v1.rs similarity index 60% rename from testnet/stacks-node/src/tests/signer.rs rename to testnet/stacks-node/src/tests/signer/v1.rs index 01024343db4..44bbc572282 100644 --- a/testnet/stacks-node/src/tests/signer.rs +++ b/testnet/stacks-node/src/tests/signer/v1.rs @@ -1,159 +1,69 @@ +// Copyright (C) 2020-2024 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . use std::collections::HashSet; use std::net::ToSocketAddrs; -use std::sync::atomic::{AtomicBool, AtomicU64, Ordering}; -use std::sync::mpsc::{channel, Receiver, Sender}; -use std::sync::{Arc, Mutex}; +use std::sync::atomic::Ordering; use std::time::{Duration, Instant}; use std::{env, thread}; use clarity::boot_util::boot_code_id; use clarity::vm::Value; -use libsigner::{ - BlockResponse, MessageSlotID, RejectCode, RunningSigner, Signer, SignerEventReceiver, - SignerMessage, -}; +use libsigner::v1::messages::{BlockResponse, MessageSlotID, RejectCode, SignerMessage}; +use libsigner::BlockProposal; use rand::thread_rng; use rand_core::RngCore; use stacks::burnchains::Txid; -use stacks::chainstate::coordinator::comm::CoordinatorChannels; -use stacks::chainstate::nakamoto::signer_set::NakamotoSigners; use stacks::chainstate::nakamoto::{NakamotoBlock, NakamotoBlockHeader}; -use stacks::chainstate::stacks::boot::{ - SIGNERS_NAME, SIGNERS_VOTING_FUNCTION_NAME, SIGNERS_VOTING_NAME, -}; +use stacks::chainstate::stacks::boot::{SIGNERS_VOTING_FUNCTION_NAME, SIGNERS_VOTING_NAME}; use stacks::chainstate::stacks::events::StackerDBChunksEvent; use stacks::chainstate::stacks::miner::TransactionEvent; use stacks::chainstate::stacks::{ - StacksPrivateKey, StacksTransaction, ThresholdSignature, TransactionAnchorMode, - TransactionAuth, TransactionPayload, TransactionPostConditionMode, TransactionSmartContract, - TransactionVersion, + StacksPrivateKey, StacksTransaction, TransactionAnchorMode, TransactionAuth, + TransactionPayload, TransactionPostConditionMode, TransactionSmartContract, TransactionVersion, }; -use stacks::core::StacksEpoch; -use stacks::net::api::postblock_proposal::BlockValidateResponse; use stacks::util_lib::strings::StacksString; use stacks_common::bitvec::BitVec; use stacks_common::codec::StacksMessageCodec; -use stacks_common::consts::{CHAIN_ID_TESTNET, SIGNER_SLOTS_PER_USER}; +use stacks_common::consts::CHAIN_ID_TESTNET; use stacks_common::types::chainstate::{ ConsensusHash, StacksAddress, StacksBlockId, StacksPublicKey, TrieHash, }; -use stacks_common::types::StacksEpochId; use stacks_common::util::hash::{hex_bytes, MerkleTree, Sha512Trunc256Sum}; use stacks_common::util::secp256k1::MessageSignature; -use stacks_signer::client::{StackerDB, StacksClient}; -use stacks_signer::config::{build_signer_config_tomls, GlobalConfig as SignerConfig, Network}; -use stacks_signer::runloop::RunLoopCommand; -use stacks_signer::signer::{Command as SignerCommand, SignerSlotID}; +use stacks_signer::client::{SignerSlotID, StacksClient}; +use stacks_signer::runloop::{RunLoopCommand, SignerCommand, SignerResult}; +use stacks_signer::v1::coordinator::CoordinatorSelector; +use stacks_signer::v1::stackerdb_manager::StackerDBManager; +use stacks_signer::v1::SpawnedSigner; use tracing_subscriber::prelude::*; use tracing_subscriber::{fmt, EnvFilter}; use wsts::curve::point::Point; use wsts::curve::scalar::Scalar; +use wsts::net::Message; use wsts::state_machine::OperationResult; -use crate::config::{Config as NeonConfig, EventKeyType, EventObserverConfig, InitialBalance}; +use super::SignerTest; use crate::event_dispatcher::MinedNakamotoBlockEvent; -use crate::neon::Counters; -use crate::run_loop::boot_nakamoto; -use crate::tests::bitcoin_regtest::BitcoinCoreController; use crate::tests::nakamoto_integrations::{ - boot_to_epoch_3_reward_set, naka_neon_integration_conf, next_block_and, - next_block_and_mine_commit, POX_4_DEFAULT_STACKER_BALANCE, -}; -use crate::tests::neon_integrations::{ - next_block_and_wait, run_until_burnchain_height, test_observer, wait_for_runloop, + boot_to_epoch_3_reward_set, boot_to_epoch_3_reward_set_calculation_boundary, next_block_and, }; +use crate::tests::neon_integrations::{next_block_and_wait, test_observer}; use crate::tests::to_addr; -use crate::{BitcoinRegtestController, BurnchainController}; - -// Helper struct for holding the btc and stx neon nodes -#[allow(dead_code)] -struct RunningNodes { - pub btc_regtest_controller: BitcoinRegtestController, - pub btcd_controller: BitcoinCoreController, - pub run_loop_thread: thread::JoinHandle<()>, - pub run_loop_stopper: Arc, - pub vrfs_submitted: Arc, - pub commits_submitted: Arc, - pub blocks_processed: Arc, - pub coord_channel: Arc>, - pub conf: NeonConfig, -} - -struct SignerTest { - // The stx and bitcoin nodes and their run loops - pub running_nodes: RunningNodes, - // The channels for sending commands to the signers - pub signer_cmd_senders: Vec>, - // The channels for receiving results from the signers - pub result_receivers: Vec>>, - // The running signer and its threads - pub running_signers: Vec>>, - // the private keys of the signers - pub signer_stacks_private_keys: Vec, - // link to the stacks node - pub stacks_client: StacksClient, - // Unique number used to isolate files created during the test - pub run_stamp: u16, -} - -impl SignerTest { - fn new(num_signers: usize) -> Self { - // Generate Signer Data - let signer_stacks_private_keys = (0..num_signers) - .map(|_| StacksPrivateKey::new()) - .collect::>(); - - let (mut naka_conf, _miner_account) = naka_neon_integration_conf(None); - // So the combination is... one, two, three, four, five? That's the stupidest combination I've ever heard in my life! - // That's the kind of thing an idiot would have on his luggage! - let password = "12345"; - naka_conf.connection_options.block_proposal_token = Some(password.to_string()); - - let run_stamp = rand::random(); - - // Setup the signer and coordinator configurations - let signer_configs = build_signer_config_tomls( - &signer_stacks_private_keys, - &naka_conf.node.rpc_bind, - Some(Duration::from_millis(128)), // Timeout defaults to 5 seconds. Let's override it to 128 milliseconds. - &Network::Testnet, - password, - run_stamp, - 3000, - ); - - let mut running_signers = Vec::new(); - let mut signer_cmd_senders = Vec::new(); - let mut result_receivers = Vec::new(); - for i in 0..num_signers { - let (cmd_send, cmd_recv) = channel(); - let (res_send, res_recv) = channel(); - info!("spawn signer"); - running_signers.push(spawn_signer( - &signer_configs[i as usize], - cmd_recv, - res_send, - )); - signer_cmd_senders.push(cmd_send); - result_receivers.push(res_recv); - } - - // Setup the nodes and deploy the contract to it - let node = setup_stx_btc_node(naka_conf, &signer_stacks_private_keys, &signer_configs); - let config = SignerConfig::load_from_str(&signer_configs[0]).unwrap(); - let stacks_client = StacksClient::from(&config); - - Self { - running_nodes: node, - result_receivers, - signer_cmd_senders, - running_signers, - signer_stacks_private_keys, - stacks_client, - run_stamp, - } - } +use crate::BurnchainController; +impl SignerTest { fn boot_to_epoch_3(&mut self, timeout: Duration) -> Point { boot_to_epoch_3_reward_set( &self.running_nodes.conf, @@ -161,6 +71,7 @@ impl SignerTest { &self.signer_stacks_private_keys, &self.signer_stacks_private_keys, &mut self.running_nodes.btc_regtest_controller, + Some(self.num_stacking_cycles), ); let dkg_vote = self.wait_for_dkg(timeout); @@ -197,46 +108,6 @@ impl SignerTest { set_dkg } - fn nmb_blocks_to_reward_set_calculation(&mut self) -> u64 { - let prepare_phase_len = self - .running_nodes - .conf - .get_burnchain() - .pox_constants - .prepare_length as u64; - let current_block_height = self - .running_nodes - .btc_regtest_controller - .get_headers_height() - .saturating_sub(1); // Must subtract 1 since get_headers_height returns current block height + 1 - let curr_reward_cycle = self.get_current_reward_cycle(); - let next_reward_cycle = curr_reward_cycle.saturating_add(1); - let next_reward_cycle_height = self - .running_nodes - .btc_regtest_controller - .get_burnchain() - .reward_cycle_to_block_height(next_reward_cycle); - let next_reward_cycle_reward_set_calculation = next_reward_cycle_height - .saturating_sub(prepare_phase_len) - .saturating_add(1); // +1 as the reward calculation occurs in the SECOND block of the prepare phase/ - - next_reward_cycle_reward_set_calculation.saturating_sub(current_block_height) - } - - fn nmb_blocks_to_reward_cycle_boundary(&mut self, reward_cycle: u64) -> u64 { - let current_block_height = self - .running_nodes - .btc_regtest_controller - .get_headers_height() - .saturating_sub(1); // Must subtract 1 since get_headers_height returns current block height + 1 - let reward_cycle_height = self - .running_nodes - .btc_regtest_controller - .get_burnchain() - .reward_cycle_to_block_height(reward_cycle); - reward_cycle_height.saturating_sub(current_block_height) - } - // Only call after already past the epoch 3.0 boundary fn run_to_dkg(&mut self, timeout: Duration) -> Option { let curr_reward_cycle = self.get_current_reward_cycle(); @@ -334,110 +205,34 @@ impl SignerTest { ) -> MinedNakamotoBlockEvent { let new_block = self.mine_nakamoto_block(timeout); let signer_sighash = new_block.signer_signature_hash.clone(); - let signature = self.wait_for_confirmed_block(&signer_sighash, timeout); + let signature = self.wait_for_confirmed_block_v1(&signer_sighash, timeout); assert!(signature.0.verify(&agg_key, signer_sighash.as_bytes())); new_block } - fn mine_nakamoto_block(&mut self, timeout: Duration) -> MinedNakamotoBlockEvent { - let commits_submitted = self.running_nodes.commits_submitted.clone(); - let mined_block_time = Instant::now(); - next_block_and_mine_commit( - &mut self.running_nodes.btc_regtest_controller, - timeout.as_secs(), - &self.running_nodes.coord_channel, - &commits_submitted, - ) - .unwrap(); - - let t_start = Instant::now(); - while test_observer::get_mined_nakamoto_blocks().is_empty() { - assert!( - t_start.elapsed() < timeout, - "Timed out while waiting for mined nakamoto block event" - ); - thread::sleep(Duration::from_secs(1)); - } - let mined_block_elapsed_time = mined_block_time.elapsed(); - info!( - "Nakamoto block mine time elapsed: {:?}", - mined_block_elapsed_time - ); - test_observer::get_mined_nakamoto_blocks().pop().unwrap() - } - - fn wait_for_confirmed_block( - &mut self, - block_signer_sighash: &Sha512Trunc256Sum, - timeout: Duration, - ) -> ThresholdSignature { - let t_start = Instant::now(); - while t_start.elapsed() <= timeout { - let blocks = test_observer::get_blocks(); - if let Some(signature) = blocks.iter().find_map(|block_json| { - let block_obj = block_json.as_object().unwrap(); - let sighash = block_obj - // use the try operator because non-nakamoto blocks - // do not supply this field - .get("signer_signature_hash")? - .as_str() - .unwrap(); - if sighash != &format!("0x{block_signer_sighash}") { - return None; - } - let signer_signature_hex = - block_obj.get("signer_signature").unwrap().as_str().unwrap(); - let signer_signature_bytes = hex_bytes(&signer_signature_hex[2..]).unwrap(); - let signer_signature = ThresholdSignature::consensus_deserialize( - &mut signer_signature_bytes.as_slice(), - ) - .unwrap(); - Some(signer_signature) - }) { - return signature; - } - thread::sleep(Duration::from_millis(500)); - } - panic!("Timed out while waiting for confirmation of block with signer sighash = {block_signer_sighash}") - } - - fn wait_for_validate_ok_response(&mut self, timeout: Duration) -> Sha512Trunc256Sum { - // Wait for the block to show up in the test observer (Don't have to wait long as if we have received a mined block already, - // we know that the signers have already received their block proposal events via their event observers) - let t_start = Instant::now(); - while test_observer::get_proposal_responses().is_empty() { - assert!( - t_start.elapsed() < timeout, - "Timed out while waiting for block proposal event" - ); - thread::sleep(Duration::from_secs(1)); - } - let validate_response = test_observer::get_proposal_responses() - .pop() - .expect("No block proposal"); - match validate_response { - BlockValidateResponse::Ok(block_validated) => block_validated.signer_signature_hash, - _ => panic!("Unexpected response"), - } - } - fn wait_for_dkg(&mut self, timeout: Duration) -> Point { debug!("Waiting for DKG..."); let mut key = Point::default(); let dkg_now = Instant::now(); - for recv in self.result_receivers.iter() { + for signer in self.spawned_signers.iter() { let mut aggregate_public_key = None; loop { - let results = recv + let results = signer + .res_recv .recv_timeout(timeout) .expect("failed to recv dkg results"); for result in results { match result { - OperationResult::Dkg(point) => { + SignerResult::OperationResult(OperationResult::Dkg(point)) => { info!("Received aggregate_group_key {point}"); aggregate_public_key = Some(point); } - other => panic!("{}", operation_panic_message(&other)), + SignerResult::OperationResult(other) => { + panic!("{}", operation_panic_message(&other)) + } + SignerResult::StatusCheck(state) => { + panic!("Received status check result: {:?}", state); + } } } if aggregate_public_key.is_some() || dkg_now.elapsed() > timeout { @@ -452,50 +247,6 @@ impl SignerTest { key } - fn run_until_epoch_3_boundary(&mut self) { - let epochs = self.running_nodes.conf.burnchain.epochs.clone().unwrap(); - let epoch_3 = - &epochs[StacksEpoch::find_epoch_by_id(&epochs, StacksEpochId::Epoch30).unwrap()]; - - let epoch_30_boundary = epoch_3.start_height - 1; - // advance to epoch 3.0 and trigger a sign round (cannot vote on blocks in pre epoch 3.0) - run_until_burnchain_height( - &mut self.running_nodes.btc_regtest_controller, - &self.running_nodes.blocks_processed, - epoch_30_boundary, - &self.running_nodes.conf, - ); - info!("Advanced to Nakamoto epoch 3.0 boundary {epoch_30_boundary}! Ready to Sign Blocks!"); - } - - fn get_current_reward_cycle(&self) -> u64 { - let block_height = self - .running_nodes - .btc_regtest_controller - .get_headers_height(); - self.running_nodes - .btc_regtest_controller - .get_burnchain() - .block_height_to_reward_cycle(block_height) - .unwrap() - } - - fn get_signer_index(&self, reward_cycle: u64) -> SignerSlotID { - let valid_signer_set = - u32::try_from(reward_cycle % 2).expect("FATAL: reward_cycle % 2 exceeds u32::MAX"); - let signer_stackerdb_contract_id = boot_code_id(SIGNERS_NAME, false); - - self.stacks_client - .get_stackerdb_signer_slots(&signer_stackerdb_contract_id, valid_signer_set) - .expect("FATAL: failed to get signer slots from stackerdb") - .iter() - .position(|(address, _)| address == self.stacks_client.get_signer_address()) - .map(|pos| { - SignerSlotID(u32::try_from(pos).expect("FATAL: number of signers exceeds u32::MAX")) - }) - .expect("FATAL: signer not registered") - } - fn generate_invalid_transactions(&self) -> Vec { let host = self .running_nodes @@ -549,7 +300,7 @@ impl SignerTest { None, ), }; - let invalid_contract_address = StacksClient::build_signed_contract_call_transaction( + let invalid_contract_address = StacksClient::build_unsigned_contract_call_transaction( &StacksAddress::p2pkh(false, &StacksPublicKey::from_private(&signer_private_key)), contract_name.clone(), SIGNERS_VOTING_FUNCTION_NAME.into(), @@ -558,11 +309,10 @@ impl SignerTest { TransactionVersion::Testnet, CHAIN_ID_TESTNET, 1, - 10, ) .unwrap(); - let invalid_contract_name = StacksClient::build_signed_contract_call_transaction( + let invalid_contract_name = StacksClient::build_unsigned_contract_call_transaction( &contract_addr, "bad-signers-contract-name".into(), SIGNERS_VOTING_FUNCTION_NAME.into(), @@ -571,11 +321,10 @@ impl SignerTest { TransactionVersion::Testnet, CHAIN_ID_TESTNET, 1, - 10, ) .unwrap(); - let invalid_signers_vote_function = StacksClient::build_signed_contract_call_transaction( + let invalid_signers_vote_function = StacksClient::build_unsigned_contract_call_transaction( &contract_addr, contract_name.clone(), "some-other-function".into(), @@ -584,12 +333,11 @@ impl SignerTest { TransactionVersion::Testnet, CHAIN_ID_TESTNET, 1, - 10, ) .unwrap(); let invalid_function_arg_signer_index = - StacksClient::build_signed_contract_call_transaction( + StacksClient::build_unsigned_contract_call_transaction( &contract_addr, contract_name.clone(), SIGNERS_VOTING_FUNCTION_NAME.into(), @@ -603,11 +351,10 @@ impl SignerTest { TransactionVersion::Testnet, CHAIN_ID_TESTNET, 1, - 10, ) .unwrap(); - let invalid_function_arg_key = StacksClient::build_signed_contract_call_transaction( + let invalid_function_arg_key = StacksClient::build_unsigned_contract_call_transaction( &contract_addr, contract_name.clone(), SIGNERS_VOTING_FUNCTION_NAME.into(), @@ -621,11 +368,10 @@ impl SignerTest { TransactionVersion::Testnet, CHAIN_ID_TESTNET, 1, - 10, ) .unwrap(); - let invalid_function_arg_round = StacksClient::build_signed_contract_call_transaction( + let invalid_function_arg_round = StacksClient::build_unsigned_contract_call_transaction( &contract_addr, contract_name.clone(), SIGNERS_VOTING_FUNCTION_NAME.into(), @@ -639,12 +385,11 @@ impl SignerTest { TransactionVersion::Testnet, CHAIN_ID_TESTNET, 1, - 10, ) .unwrap(); let invalid_function_arg_reward_cycle = - StacksClient::build_signed_contract_call_transaction( + StacksClient::build_unsigned_contract_call_transaction( &contract_addr, contract_name.clone(), SIGNERS_VOTING_FUNCTION_NAME.into(), @@ -658,11 +403,10 @@ impl SignerTest { TransactionVersion::Testnet, CHAIN_ID_TESTNET, 1, - 10, ) .unwrap(); - let invalid_nonce = StacksClient::build_signed_contract_call_transaction( + let invalid_nonce = StacksClient::build_unsigned_contract_call_transaction( &contract_addr, contract_name.clone(), SIGNERS_VOTING_FUNCTION_NAME.into(), @@ -671,7 +415,6 @@ impl SignerTest { TransactionVersion::Testnet, CHAIN_ID_TESTNET, 0, // Old nonce - 10, ) .unwrap(); @@ -682,10 +425,10 @@ impl SignerTest { false, ); let invalid_signer_tx = invalid_stacks_client - .build_vote_for_aggregate_public_key(0, round, point, reward_cycle, None, 0) + .build_unsigned_vote_for_aggregate_public_key(0, round, point, reward_cycle, 0) .expect("FATAL: failed to build vote for aggregate public key"); - vec![ + let unsigned_txs = vec![ invalid_nonce, invalid_not_contract_call, invalid_contract_name, @@ -696,194 +439,15 @@ impl SignerTest { invalid_function_arg_round, invalid_function_arg_signer_index, invalid_signer_tx, - ] - } - - /// Kills the signer runloop at index `signer_idx` - /// and returns the private key of the killed signer. - /// - /// # Panics - /// Panics if `signer_idx` is out of bounds - fn stop_signer(&mut self, signer_idx: usize) -> StacksPrivateKey { - let running_signer = self.running_signers.remove(signer_idx); - self.signer_cmd_senders.remove(signer_idx); - self.result_receivers.remove(signer_idx); - let signer_key = self.signer_stacks_private_keys.remove(signer_idx); - - running_signer.stop(); - signer_key - } - - /// (Re)starts a new signer runloop with the given private key - fn restart_signer(&mut self, signer_idx: usize, signer_private_key: StacksPrivateKey) { - let signer_config = build_signer_config_tomls( - &[signer_private_key], - &self.running_nodes.conf.node.rpc_bind, - Some(Duration::from_millis(128)), // Timeout defaults to 5 seconds. Let's override it to 128 milliseconds. - &Network::Testnet, - "12345", // It worked sir, we have the combination! -Great, what's the combination? - self.run_stamp, - 3000 + signer_idx, - ) - .pop() - .unwrap(); - - let (cmd_send, cmd_recv) = channel(); - let (res_send, res_recv) = channel(); - - info!("Restarting signer"); - let signer = spawn_signer(&signer_config, cmd_recv, res_send); - - self.result_receivers.insert(signer_idx, res_recv); - self.signer_cmd_senders.insert(signer_idx, cmd_send); - self.running_signers.insert(signer_idx, signer); - } - - fn shutdown(self) { - self.running_nodes - .coord_channel - .lock() - .expect("Mutex poisoned") - .stop_chains_coordinator(); - - self.running_nodes - .run_loop_stopper - .store(false, Ordering::SeqCst); - // Stop the signers before the node to prevent hanging - for signer in self.running_signers { - assert!(signer.stop().is_none()); - } - self.running_nodes.run_loop_thread.join().unwrap(); - } -} - -fn spawn_signer( - data: &str, - receiver: Receiver, - sender: Sender>, -) -> RunningSigner> { - let config = SignerConfig::load_from_str(data).unwrap(); - let ev = SignerEventReceiver::new(config.network.is_mainnet()); - let endpoint = config.endpoint; - let runloop: stacks_signer::runloop::RunLoop = stacks_signer::runloop::RunLoop::from(config); - let mut signer: Signer< - RunLoopCommand, - Vec, - stacks_signer::runloop::RunLoop, - SignerEventReceiver, - > = Signer::new(runloop, ev, receiver, sender); - info!("Spawning signer on endpoint {}", endpoint); - signer.spawn(endpoint).unwrap() -} - -fn setup_stx_btc_node( - mut naka_conf: NeonConfig, - signer_stacks_private_keys: &[StacksPrivateKey], - signer_config_tomls: &[String], -) -> RunningNodes { - // Spawn the endpoints for observing signers - for toml in signer_config_tomls { - let signer_config = SignerConfig::load_from_str(toml).unwrap(); - - naka_conf.events_observers.insert(EventObserverConfig { - endpoint: format!("{}", signer_config.endpoint), - events_keys: vec![ - EventKeyType::StackerDBChunks, - EventKeyType::BlockProposal, - EventKeyType::BurnchainBlocks, - ], - }); - } - - // Spawn a test observer for verification purposes - test_observer::spawn(); - let observer_port = test_observer::EVENT_OBSERVER_PORT; - naka_conf.events_observers.insert(EventObserverConfig { - endpoint: format!("localhost:{observer_port}"), - events_keys: vec![ - EventKeyType::StackerDBChunks, - EventKeyType::BlockProposal, - EventKeyType::MinedBlocks, - ], - }); - - // The signers need some initial balances in order to pay for epoch 2.5 transaction votes - let mut initial_balances = Vec::new(); - - // TODO: separate keys for stacking and signing (because they'll be different in prod) - for key in signer_stacks_private_keys { - initial_balances.push(InitialBalance { - address: to_addr(key).into(), - amount: POX_4_DEFAULT_STACKER_BALANCE, - }); - } - naka_conf.initial_balances.append(&mut initial_balances); - naka_conf.node.stacker = true; - naka_conf.miner.wait_on_interim_blocks = Duration::from_secs(1000); - - for signer_set in 0..2 { - for message_id in 0..SIGNER_SLOTS_PER_USER { - let contract_id = - NakamotoSigners::make_signers_db_contract_id(signer_set, message_id, false); - if !naka_conf.node.stacker_dbs.contains(&contract_id) { - debug!("A miner/stacker must subscribe to the {contract_id} stacker db contract. Forcibly subscribing..."); - naka_conf.node.stacker_dbs.push(contract_id); - } - } - } - info!("Make new BitcoinCoreController"); - let mut btcd_controller = BitcoinCoreController::new(naka_conf.clone()); - btcd_controller - .start_bitcoind() - .map_err(|_e| ()) - .expect("Failed starting bitcoind"); - - info!("Make new BitcoinRegtestController"); - let mut btc_regtest_controller = BitcoinRegtestController::new(naka_conf.clone(), None); - - info!("Bootstraping..."); - btc_regtest_controller.bootstrap_chain(201); - - info!("Chain bootstrapped..."); - - let mut run_loop = boot_nakamoto::BootRunLoop::new(naka_conf.clone()).unwrap(); - let run_loop_stopper = run_loop.get_termination_switch(); - let Counters { - blocks_processed, - naka_submitted_vrfs: vrfs_submitted, - naka_submitted_commits: commits_submitted, - .. - } = run_loop.counters(); - - let coord_channel = run_loop.coordinator_channels(); - let run_loop_thread = thread::spawn(move || run_loop.start(None, 0)); - - // Give the run loop some time to start up! - info!("Wait for runloop..."); - wait_for_runloop(&blocks_processed); - - // First block wakes up the run loop. - info!("Mine first block..."); - next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - - // Second block will hold our VRF registration. - info!("Mine second block..."); - next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - - // Third block will be the first mined Stacks block. - info!("Mine third block..."); - next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - - RunningNodes { - btcd_controller, - btc_regtest_controller, - run_loop_thread, - run_loop_stopper, - vrfs_submitted: vrfs_submitted.0, - commits_submitted: commits_submitted.0, - blocks_processed: blocks_processed.0, - coord_channel, - conf: naka_conf, + ]; + unsigned_txs + .into_iter() + .map(|unsigned| { + invalid_stacks_client + .sign_transaction(unsigned) + .expect("Failed to sign transaction") + }) + .collect() } } @@ -910,7 +474,7 @@ fn operation_panic_message(result: &OperationResult) -> String { #[test] #[ignore] /// Test the signer can respond to external commands to perform DKG -fn stackerdb_dkg() { +fn dkg() { if env::var("BITCOIND_TEST") != Ok("1".into()) { return; } @@ -922,7 +486,7 @@ fn stackerdb_dkg() { info!("------------------------- Test Setup -------------------------"); let timeout = Duration::from_secs(200); - let mut signer_test = SignerTest::new(10); + let mut signer_test = SignerTest::new(10, vec![], None); info!("Boot to epoch 3.0 reward calculation..."); boot_to_epoch_3_reward_set( &signer_test.running_nodes.conf, @@ -930,6 +494,7 @@ fn stackerdb_dkg() { &signer_test.signer_stacks_private_keys, &signer_test.signer_stacks_private_keys, &mut signer_test.running_nodes.btc_regtest_controller, + Some(signer_test.num_stacking_cycles), ); info!("Pox 4 activated and at epoch 3.0 reward set calculation (2nd block of its prepare phase)! Ready for signers to perform DKG and Sign!"); @@ -942,8 +507,9 @@ fn stackerdb_dkg() { // Determine the coordinator of the current node height info!("signer_runloop: spawn send commands to do dkg"); let dkg_now = Instant::now(); - for sender in signer_test.signer_cmd_senders.iter() { - sender + for signer in signer_test.spawned_signers.iter() { + signer + .cmd_send .send(RunLoopCommand { reward_cycle, command: SignerCommand::Dkg, @@ -959,8 +525,8 @@ fn stackerdb_dkg() { #[test] #[ignore] -/// Test the signer can respond to external commands to perform DKG -fn stackerdb_sign() { +/// Test the signer rejects requests to sign that do not come from a miner +fn sign_request_rejected() { if env::var("BITCOIND_TEST") != Ok("1".into()) { return; } @@ -981,9 +547,10 @@ fn stackerdb_sign() { parent_block_id: StacksBlockId([0x05; 32]), tx_merkle_root: Sha512Trunc256Sum([0x06; 32]), state_index_root: TrieHash([0x07; 32]), + timestamp: 8, miner_signature: MessageSignature::empty(), - signer_signature: ThresholdSignature::empty(), - signer_bitvec: BitVec::zeros(1).unwrap(), + signer_signature: vec![], + pox_treatment: BitVec::zeros(1).unwrap(), }; let mut block1 = NakamotoBlock { header: header1, @@ -1008,9 +575,10 @@ fn stackerdb_sign() { parent_block_id: StacksBlockId([0x06; 32]), tx_merkle_root: Sha512Trunc256Sum([0x07; 32]), state_index_root: TrieHash([0x08; 32]), + timestamp: 9, miner_signature: MessageSignature::empty(), - signer_signature: ThresholdSignature::empty(), - signer_bitvec: BitVec::zeros(1).unwrap(), + signer_signature: vec![], + pox_treatment: BitVec::zeros(1).unwrap(), }; let mut block2 = NakamotoBlock { header: header2, @@ -1028,18 +596,28 @@ fn stackerdb_sign() { block2.header.tx_merkle_root = tx_merkle_root2; let timeout = Duration::from_secs(200); - let mut signer_test = SignerTest::new(10); + let mut signer_test: SignerTest = SignerTest::new(10, vec![], None); let _key = signer_test.boot_to_epoch_3(timeout); info!("------------------------- Test Sign -------------------------"); let reward_cycle = signer_test.get_current_reward_cycle(); + let block_proposal_1 = BlockProposal { + block: block1.clone(), + burn_height: 0, + reward_cycle, + }; + let block_proposal_2 = BlockProposal { + block: block2.clone(), + burn_height: 0, + reward_cycle, + }; // Determine the coordinator of the current node height info!("signer_runloop: spawn send commands to do sign"); let sign_now = Instant::now(); let sign_command = RunLoopCommand { reward_cycle, command: SignerCommand::Sign { - block: block1, + block_proposal: block_proposal_1, is_taproot: false, merkle_root: None, }, @@ -1047,16 +625,18 @@ fn stackerdb_sign() { let sign_taproot_command = RunLoopCommand { reward_cycle, command: SignerCommand::Sign { - block: block2, + block_proposal: block_proposal_2, is_taproot: true, merkle_root: None, }, }; - for sender in signer_test.signer_cmd_senders.iter() { - sender + for signer in signer_test.spawned_signers.iter() { + signer + .cmd_send .send(sign_command.clone()) .expect("failed to send sign command"); - sender + signer + .cmd_send .send(sign_taproot_command.clone()) .expect("failed to send sign taproot command"); } @@ -1094,6 +674,168 @@ fn stackerdb_sign() { info!("Sign Time Elapsed: {:.2?}", sign_elapsed); } +#[test] +#[ignore] +/// Test that a signer can be offline when a DKG round has commenced and +/// can rejoin the DKG round after it has restarted +fn delayed_dkg() { + if env::var("BITCOIND_TEST") != Ok("1".into()) { + return; + } + + tracing_subscriber::registry() + .with(fmt::layer()) + .with(EnvFilter::from_default_env()) + .init(); + + info!("------------------------- Test Setup -------------------------"); + let timeout = Duration::from_secs(200); + let num_signers = 3; + let mut signer_test = SignerTest::new(num_signers, vec![], None); + boot_to_epoch_3_reward_set_calculation_boundary( + &signer_test.running_nodes.conf, + &signer_test.running_nodes.blocks_processed, + &signer_test.signer_stacks_private_keys, + &signer_test.signer_stacks_private_keys, + &mut signer_test.running_nodes.btc_regtest_controller, + Some(signer_test.num_stacking_cycles), + ); + let reward_cycle = signer_test.get_current_reward_cycle().saturating_add(1); + let public_keys = signer_test.get_signer_public_keys(reward_cycle); + let coordinator_selector = CoordinatorSelector::from(public_keys); + let (_, coordinator_public_key) = coordinator_selector.get_coordinator(); + let coordinator_public_key = + StacksPublicKey::from_slice(coordinator_public_key.to_bytes().as_slice()).unwrap(); + let signer_slot_ids: Vec<_> = (0..num_signers) + .into_iter() + .map(|i| SignerSlotID(i as u32)) + .collect(); + let mut stackerdbs: Vec<_> = signer_slot_ids + .iter() + .map(|i| { + StackerDBManager::new( + &signer_test.running_nodes.conf.node.rpc_bind, + StacksPrivateKey::new(), // Doesn't matter what key we use. We are just reading, not writing + false, + reward_cycle, + *i, + ) + }) + .collect(); + info!("------------------------- Stop Signers -------------------------"); + let mut to_stop = None; + for (idx, key) in signer_test.signer_stacks_private_keys.iter().enumerate() { + let public_key = StacksPublicKey::from_private(key); + if public_key == coordinator_public_key { + // Do not stop the coordinator. We want coordinator to start a DKG round + continue; + } + // Only stop one signer + to_stop = Some(idx); + break; + } + let signer_idx = to_stop.expect("Failed to find a signer to stop"); + let signer_key = signer_test.stop_signer(signer_idx); + debug!( + "Removed signer {signer_idx} with key: {:?}, {}", + signer_key, + signer_key.to_hex() + ); + info!("------------------------- Start DKG -------------------------"); + info!("Waiting for DKG to start..."); + // Advance one more to trigger DKG + next_block_and( + &mut signer_test.running_nodes.btc_regtest_controller, + timeout.as_secs(), + || Ok(true), + ) + .expect("Failed to mine bitcoin block"); + // Do not proceed until we guarantee that DKG was triggered + let start_time = Instant::now(); + loop { + let stackerdb = stackerdbs.first_mut().unwrap(); + let dkg_packets: Vec<_> = stackerdb + .get_dkg_packets(&signer_slot_ids) + .expect("Failed to get dkg packets"); + let begin_packets: Vec<_> = dkg_packets + .iter() + .filter_map(|packet| { + if matches!(packet.msg, Message::DkgBegin(_)) { + Some(packet) + } else { + None + } + }) + .collect(); + if !begin_packets.is_empty() { + break; + } + assert!( + start_time.elapsed() < Duration::from_secs(30), + "Timed out waiting for DKG to be triggered" + ); + } + + info!("------------------------- Restart Stopped Signer -------------------------"); + + signer_test.restart_signer(signer_idx, signer_key); + + info!("------------------------- Wait for DKG -------------------------"); + let key = signer_test.wait_for_dkg(timeout); + let mut transactions = HashSet::with_capacity(num_signers); + let start_time = Instant::now(); + while transactions.len() < num_signers { + for stackerdb in stackerdbs.iter_mut() { + let current_transactions = stackerdb + .get_current_transactions() + .expect("Failed getting current transactions for signer slot id"); + for tx in current_transactions { + transactions.insert(tx.txid()); + } + } + assert!( + start_time.elapsed() < Duration::from_secs(30), + "Failed to retrieve pending vote transactions within timeout" + ); + } + + // Make sure transactions get mined + let start_time = Instant::now(); + while !transactions.is_empty() { + assert!( + start_time.elapsed() < Duration::from_secs(30), + "Failed to mine transactions within timeout" + ); + next_block_and_wait( + &mut signer_test.running_nodes.btc_regtest_controller, + &signer_test.running_nodes.blocks_processed, + ); + let blocks = test_observer::get_blocks(); + for block in blocks.iter() { + let txs = block.get("transactions").unwrap().as_array().unwrap(); + for tx in txs.iter() { + let raw_tx = tx.get("raw_tx").unwrap().as_str().unwrap(); + if raw_tx == "0x00" { + continue; + } + let tx_bytes = hex_bytes(&raw_tx[2..]).unwrap(); + let parsed = StacksTransaction::consensus_deserialize(&mut &tx_bytes[..]).unwrap(); + transactions.remove(&parsed.txid()); + } + } + } + + // Make sure DKG did get set + assert_eq!( + key, + signer_test + .stacks_client + .get_approved_aggregate_key(reward_cycle) + .expect("Failed to get approved aggregate key") + .expect("No approved aggregate key found") + ); +} + pub fn find_block_response(chunk_events: Vec) -> Option { for event in chunk_events.into_iter() { if event.contract_id.name.as_str() @@ -1130,7 +872,7 @@ pub fn find_block_response(chunk_events: Vec) -> Option = SignerTest::new(num_signers, vec![], None); + let timeout = Duration::from_secs(30); let short_timeout = Duration::from_secs(30); let key = signer_test.boot_to_epoch_3(timeout); @@ -1154,11 +897,23 @@ fn stackerdb_block_proposal() { info!("------------------------- Test Block Signed -------------------------"); // Verify that the signers signed the proposed block - let signature = signer_test.wait_for_confirmed_block(&proposed_signer_signature_hash, timeout); + let signature = + signer_test.wait_for_confirmed_block_v1(&proposed_signer_signature_hash, timeout); assert!(signature .0 .verify(&key, proposed_signer_signature_hash.as_bytes())); + // Test prometheus metrics response + #[cfg(feature = "monitoring_prom")] + { + let metrics_response = signer_test.get_signer_metrics(); + + // Because 5 signers are running in the same process, the prometheus metrics + // are incremented once for every signer. This is why we expect the metric to be + // `5`, even though there is only one block proposed. + let expected_result = format!("stacks_signer_block_proposals_received {}", num_signers); + assert!(metrics_response.contains(&expected_result)); + } signer_test.shutdown(); } @@ -1176,7 +931,7 @@ fn stackerdb_block_proposal() { /// /// Test Assertion: /// Signers can perform DKG and sign blocks across Nakamoto reward cycles. -fn stackerdb_mine_2_nakamoto_reward_cycles() { +fn mine_2_nakamoto_reward_cycles() { if env::var("BITCOIND_TEST") != Ok("1".into()) { return; } @@ -1188,7 +943,7 @@ fn stackerdb_mine_2_nakamoto_reward_cycles() { info!("------------------------- Test Setup -------------------------"); let nmb_reward_cycles = 2; - let mut signer_test = SignerTest::new(5); + let mut signer_test: SignerTest = SignerTest::new(5, vec![], None); let timeout = Duration::from_secs(200); let first_dkg = signer_test.boot_to_epoch_3(timeout); let curr_reward_cycle = signer_test.get_current_reward_cycle(); @@ -1251,7 +1006,7 @@ fn stackerdb_mine_2_nakamoto_reward_cycles() { /// Miner proposes a block to the signers containing all expected transactions. /// Signers broadcast block approval with a signature back to the waiting miner. /// Miner includes the signers' signature in the block and finishes mining it. -fn stackerdb_filter_bad_transactions() { +fn filter_bad_transactions() { if env::var("BITCOIND_TEST") != Ok("1".into()) { return; } @@ -1263,7 +1018,7 @@ fn stackerdb_filter_bad_transactions() { info!("------------------------- Test Setup -------------------------"); // Advance to the prepare phase of a post epoch 3.0 reward cycle to force signers to look at the next signer transactions to compare against a proposed block - let mut signer_test = SignerTest::new(5); + let mut signer_test: SignerTest = SignerTest::new(5, vec![], None); let timeout = Duration::from_secs(200); let current_signers_dkg = signer_test.boot_to_epoch_3(timeout); let next_signers_dkg = signer_test @@ -1285,7 +1040,7 @@ fn stackerdb_filter_bad_transactions() { let next_reward_cycle = signer_test.get_current_reward_cycle().saturating_add(1); // Must submit to the NEXT reward cycle slots as they are the ones looked at by the CURRENT miners let signer_index = signer_test.get_signer_index(next_reward_cycle); - let mut stackerdb = StackerDB::new( + let mut stackerdb = StackerDBManager::new( &signer_test.running_nodes.conf.node.rpc_bind, signer_private_key, false, @@ -1339,7 +1094,7 @@ fn stackerdb_filter_bad_transactions() { /// /// Test Assertion: /// The signers are able to produce a valid signature after one of them is restarted. -fn stackerdb_sign_after_signer_reboot() { +fn sign_after_signer_reboot() { if env::var("BITCOIND_TEST") != Ok("1".into()) { return; } @@ -1350,7 +1105,8 @@ fn stackerdb_sign_after_signer_reboot() { .init(); info!("------------------------- Test Setup -------------------------"); - let mut signer_test = SignerTest::new(3); + let num_signers = 3; + let mut signer_test: SignerTest = SignerTest::new(num_signers, vec![], None); let timeout = Duration::from_secs(200); let short_timeout = Duration::from_secs(30); @@ -1361,7 +1117,7 @@ fn stackerdb_sign_after_signer_reboot() { signer_test.mine_nakamoto_block(timeout); let proposed_signer_signature_hash = signer_test.wait_for_validate_ok_response(short_timeout); let signature = - signer_test.wait_for_confirmed_block(&proposed_signer_signature_hash, short_timeout); + signer_test.wait_for_confirmed_block_v1(&proposed_signer_signature_hash, short_timeout); assert!( signature.verify(&key, proposed_signer_signature_hash.0.as_slice()), @@ -1379,10 +1135,19 @@ fn stackerdb_sign_after_signer_reboot() { info!("------------------------- Test Mine Block after restart -------------------------"); - signer_test.mine_nakamoto_block(timeout); + let last_block = signer_test.mine_nakamoto_block(timeout); let proposed_signer_signature_hash = signer_test.wait_for_validate_ok_response(short_timeout); let frost_signature = - signer_test.wait_for_confirmed_block(&proposed_signer_signature_hash, short_timeout); + signer_test.wait_for_confirmed_block_v1(&proposed_signer_signature_hash, short_timeout); + + // Check that the latest block's bitvec is all 1's + assert_eq!( + last_block.signer_bitvec, + serde_json::to_value(BitVec::<4000>::ones(num_signers as u16).unwrap()) + .expect("Failed to serialize BitVec") + .as_str() + .expect("Failed to serialize BitVec") + ); assert!( frost_signature.verify(&key, proposed_signer_signature_hash.0.as_slice()),