diff --git a/.github/workflows/acir-artifacts.yml b/.github/workflows/acir-artifacts.yml deleted file mode 100644 index 43d9acfdedb..00000000000 --- a/.github/workflows/acir-artifacts.yml +++ /dev/null @@ -1,107 +0,0 @@ -name: Build ACIR artifacts - -on: - pull_request: - push: - branches: - - master - -jobs: - check-artifacts-requested: - name: Check if artifacts should be published - runs-on: ubuntu-22.04 - outputs: - publish: ${{ steps.check.outputs.result }} - - steps: - - name: Check if artifacts should be published - id: check - uses: actions/github-script@v6 - with: - github-token: ${{ secrets.GITHUB_TOKEN }} - script: | - const { REF_NAME } = process.env; - if (REF_NAME == "master") { - console.log(`publish = true`) - return true; - } - - const labels = context.payload.pull_request.labels.map(label => label.name); - const publish = labels.includes('publish-acir'); - - console.log(`publish = ${publish}`) - return publish; - result-encoding: string - env: - REF_NAME: ${{ github.ref_name }} - - build-nargo: - name: Build nargo binary - if: ${{ needs.check-artifacts-requested.outputs.publish == 'true' }} - runs-on: ubuntu-22.04 - needs: [check-artifacts-requested] - strategy: - matrix: - target: [x86_64-unknown-linux-gnu] - - steps: - - name: Checkout Noir repo - uses: actions/checkout@v4 - - - name: Setup toolchain - uses: dtolnay/rust-toolchain@1.71.1 - - - uses: Swatinem/rust-cache@v2 - with: - key: ${{ matrix.target }} - cache-on-failure: true - save-if: ${{ github.event_name != 'merge_group' }} - - - name: Build Nargo - run: cargo build --package nargo_cli --release - - - name: Package artifacts - run: | - mkdir dist - cp ./target/release/nargo ./dist/nargo - 7z a -ttar -so -an ./dist/* | 7z a -si ./nargo-x86_64-unknown-linux-gnu.tar.gz - - - name: Upload artifact - uses: actions/upload-artifact@v3 - with: - name: nargo - path: ./dist/* - retention-days: 3 - - auto-pr-rebuild-script: - name: Rebuild ACIR artifacts - needs: [build-nargo] - runs-on: ubuntu-latest - - steps: - - name: Check out code - uses: actions/checkout@v4 - - - name: Download nargo binary - uses: actions/download-artifact@v3 - with: - name: nargo - path: ./nargo - - - name: Add Nargo to $PATH - run: | - chmod +x ${{ github.workspace }}/nargo/nargo - echo "${{ github.workspace }}/nargo" >> $GITHUB_PATH - - - name: Run rebuild script - working-directory: test_programs - run: | - chmod +x ./rebuild.sh - ./rebuild.sh - - - name: Upload ACIR artifacts - uses: actions/upload-artifact@v3 - with: - name: acir-artifacts - path: ./test_programs/acir_artifacts - retention-days: 10 diff --git a/.github/workflows/publish-acvm.yml b/.github/workflows/publish-acvm.yml index 06876f27c8d..59a104e3f75 100644 --- a/.github/workflows/publish-acvm.yml +++ b/.github/workflows/publish-acvm.yml @@ -51,12 +51,6 @@ jobs: env: CARGO_REGISTRY_TOKEN: ${{ secrets.ACVM_CRATES_IO_TOKEN }} - - name: Publish acvm_stdlib - run: | - cargo publish --package acvm_stdlib - env: - CARGO_REGISTRY_TOKEN: ${{ secrets.ACVM_CRATES_IO_TOKEN }} - - name: Publish brillig_vm run: | cargo publish --package brillig_vm diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index fcf630345ac..3b2393eaa8f 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -11,6 +11,7 @@ jobs: outputs: release-pr: ${{ steps.release.outputs.pr }} tag-name: ${{ steps.release.outputs.tag_name }} + pending-release-semver: v${{ steps.release.outputs.major }}.${{steps.release.outputs.minor}}.${{steps.release.outputs.patch}} runs-on: ubuntu-latest steps: - name: Run release-please @@ -39,35 +40,9 @@ jobs: run: | ./scripts/update-acvm-workspace-versions.sh - - name: Configure git - run: | - git config user.name kevaundray - git config user.email kevtheappdev@gmail.com - - - name: Commit updates - run: | - git add Cargo.toml - git commit -m 'chore: Update root workspace acvm versions' - git push - - update-lockfile: - name: Update lockfile - needs: [release-please,update-acvm-workspace-package-versions] - if: ${{ needs.release-please.outputs.release-pr }} - runs-on: ubuntu-latest - steps: - - name: Checkout release branch - uses: actions/checkout@v4 - with: - ref: ${{ fromJSON(needs.release-please.outputs.release-pr).headBranchName }} - token: ${{ secrets.NOIR_RELEASES_TOKEN }} - - - name: Setup toolchain - uses: dtolnay/rust-toolchain@1.65.0 - - name: Update lockfile run: | - cargo update --workspace + cargo update --workspace - name: Configure git run: | @@ -76,15 +51,16 @@ jobs: - name: Commit updates run: | - git add Cargo.lock - git commit -m 'chore: Update lockfile' + git add . + git commit -m 'chore: Update root workspace acvm versions and lockfile' git push update-docs: name: Update docs - needs: [release-please, update-lockfile] - if: ${{ needs.release-please.outputs.tag-name }} + needs: [release-please, update-acvm-workspace-package-versions] + if: ${{ needs.release-please.outputs.release-pr }} runs-on: ubuntu-latest + steps: - name: Checkout release branch uses: actions/checkout@v4 @@ -102,7 +78,7 @@ jobs: - name: Cut a new version working-directory: ./docs - run: yarn docusaurus docs:version ${{ needs.release-please.outputs.tag-name }} + run: yarn docusaurus docs:version ${{ needs.release-please.outputs.pending-release-semver }} - name: Configure git run: | @@ -112,7 +88,7 @@ jobs: - name: Commit new documentation version run: | git add . - git commit -m "chore(docs): cut new docs version for tag ${{ needs.release-please.outputs.tag-name }}" + git commit -m "chore(docs): cut new docs version for tag ${{ needs.release-please.outputs.pending-release-semver }}" git push build-binaries: diff --git a/.github/workflows/test-js.yml b/.github/workflows/test-js.yml deleted file mode 100644 index 2f29e027156..00000000000 --- a/.github/workflows/test-js.yml +++ /dev/null @@ -1,52 +0,0 @@ -name: Test JS packages - -on: - pull_request: - merge_group: - push: - branches: - - master - -# This will cancel previous runs when a branch or PR is updated -concurrency: - group: ${{ github.workflow }}-${{ github.head_ref || github.ref || github.run_id }} - cancel-in-progress: true - - -jobs: - build: - name: Test JS packages - runs-on: ubuntu-latest - steps: - - name: Checkout code - uses: actions/checkout@v4 - - name: Get current date - id: date - run: echo "date=$(date +'%Y.%m.%d.%H.%M')" >> $GITHUB_STATE - - name: prepare docker images tags - id: prep - run: | - REGISTRY="ghcr.io" - IMG="${REGISTRY}/${{ github.repository }}" - IMAGE=$(echo "$IMG" | tr '[:upper:]' '[:lower:]') - TAGS="${IMAGE}:${{ github.sha }}" - TAGS="${TAGS},${IMAGE}:latest,${IMAGE}:v${{ steps.date.outputs.date }}" - echo ::set-output name=tags::${TAGS} - - name: Set up Docker Buildx - id: buildx - uses: docker/setup-buildx-action@v3 - - name: Login to GitHub Container Registry - uses: docker/login-action@v3 - with: - registry: ghcr.io - username: ${{ github.actor }} - password: ${{ secrets.GITHUB_TOKEN }} - - name: Test JS packages - uses: docker/build-push-action@v5 - with: - context: . - file: Dockerfile.ci - tags: ${{ steps.prep.outputs.tags }} - target: test-js - cache-from: type=gha - cache-to: type=gha,mode=max \ No newline at end of file diff --git a/.release-please-manifest.json b/.release-please-manifest.json index 9e3193c22fb..01f6fb140b1 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,4 +1,4 @@ { - ".": "0.20.0", - "acvm-repo": "0.37.0" + ".": "0.22.0", + "acvm-repo": "0.38.0" } \ No newline at end of file diff --git a/CHANGELOG.md b/CHANGELOG.md index 9abb97f6860..0f3f6b02245 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,82 @@ # Changelog +## [0.22.0](https://github.com/noir-lang/noir/compare/v0.21.0...v0.22.0) (2023-12-18) + + +### ⚠ BREAKING CHANGES + +* Remove unused methods on ACIR opcodes ([#3841](https://github.com/noir-lang/noir/issues/3841)) +* Remove backend field from artifacts ([#3819](https://github.com/noir-lang/noir/issues/3819)) +* Remove partial backend feature ([#3805](https://github.com/noir-lang/noir/issues/3805)) + +### Features + +* Add context-centric based API for noir_wasm ([#3798](https://github.com/noir-lang/noir/issues/3798)) ([19155d0](https://github.com/noir-lang/noir/commit/19155d02a1248c85e94f14a2a0bb383a4edeb16f)) + + +### Miscellaneous Chores + +* Remove backend field from artifacts ([#3819](https://github.com/noir-lang/noir/issues/3819)) ([fa1cf5f](https://github.com/noir-lang/noir/commit/fa1cf5f03aa21b001c31ebb9ce405e3c2859bb57)) +* Remove partial backend feature ([#3805](https://github.com/noir-lang/noir/issues/3805)) ([0383100](https://github.com/noir-lang/noir/commit/0383100853a80a5b28b797cdfeae0d271f1b7805)) +* Remove unused methods on ACIR opcodes ([#3841](https://github.com/noir-lang/noir/issues/3841)) ([9e5d0e8](https://github.com/noir-lang/noir/commit/9e5d0e813d61a0bfb5ee68174ed287c5a20f1579)) + +## [0.21.0](https://github.com/noir-lang/noir/compare/v0.20.0...v0.21.0) (2023-12-15) + + +### ⚠ BREAKING CHANGES + +* remove unused `source-resolver` package ([#3791](https://github.com/noir-lang/noir/issues/3791)) +* Make file manager read-only to the compiler ([#3760](https://github.com/noir-lang/noir/issues/3760)) + +### Features + +* Add `prelude.nr` ([#3693](https://github.com/noir-lang/noir/issues/3693)) ([5f0f81f](https://github.com/noir-lang/noir/commit/5f0f81f7f49b021880e0bff648aa6c6d0fede46c)) +* Add some traits to the stdlib ([#3796](https://github.com/noir-lang/noir/issues/3796)) ([8e11352](https://github.com/noir-lang/noir/commit/8e113526a2d78d27ed4e489f16d5604a2aaa18ea)) +* Add support for writing tracing debug info to file ([#3790](https://github.com/noir-lang/noir/issues/3790)) ([98a5004](https://github.com/noir-lang/noir/commit/98a500436a68652a367ccbf77e32f8544aff73bc)) +* Allow passing custom foreign call handlers when creating proofs in NoirJS ([#3764](https://github.com/noir-lang/noir/issues/3764)) ([6076e08](https://github.com/noir-lang/noir/commit/6076e08a0814bb6f3836af3c65a7b40c066b9494)) +* Allow underscores in integer literals ([#3746](https://github.com/noir-lang/noir/issues/3746)) ([2c06a64](https://github.com/noir-lang/noir/commit/2c06a64e502bac6839375c5636d39a172a609a5f)) +* Avoid overflow checks on boolean multiplication ([#3745](https://github.com/noir-lang/noir/issues/3745)) ([9b5b686](https://github.com/noir-lang/noir/commit/9b5b6861c3aa0e154e17598ac9994d3970f0e752)) +* Aztec-packages ([#3754](https://github.com/noir-lang/noir/issues/3754)) ([c043265](https://github.com/noir-lang/noir/commit/c043265e550b59bd4296504826fe15d3ce3e9ad2)) +* Dockerfile to test cargo and JS packages ([#3684](https://github.com/noir-lang/noir/issues/3684)) ([513d619](https://github.com/noir-lang/noir/commit/513d6196a0766082a3c88a4050498bae2cfa7e13)) +* Docs landing page with a playground ([#3667](https://github.com/noir-lang/noir/issues/3667)) ([9a95fbe](https://github.com/noir-lang/noir/commit/9a95fbeefb2ecd5a898006530a1e054cd345bfe8)) +* Enhance test information output ([#3696](https://github.com/noir-lang/noir/issues/3696)) ([468fbbc](https://github.com/noir-lang/noir/commit/468fbbca43e33b23bc662bf1d36dcb79830a291c)) +* Implement print without newline ([#3650](https://github.com/noir-lang/noir/issues/3650)) ([9827dfe](https://github.com/noir-lang/noir/commit/9827dfe51118ba55da6da51ab8bf45cffd2ca756)) +* **lsp:** Add goto definition for locals ([#3705](https://github.com/noir-lang/noir/issues/3705)) ([9dd465c](https://github.com/noir-lang/noir/commit/9dd465c23e286481fa9a35632d133901f86d5883)) +* **lsp:** Add goto definition for structs ([#3718](https://github.com/noir-lang/noir/issues/3718)) ([a576c5b](https://github.com/noir-lang/noir/commit/a576c5bba6ab92eb4798715a43475808ac954fba)) +* Optimize out unnecessary truncation instructions ([#3717](https://github.com/noir-lang/noir/issues/3717)) ([c9c72ae](https://github.com/noir-lang/noir/commit/c9c72ae7b80aa9504a082dd083b19d4b80d954c5)) +* Remove experimental feature warning for traits ([#3783](https://github.com/noir-lang/noir/issues/3783)) ([cb52242](https://github.com/noir-lang/noir/commit/cb522429592477c2b0544f3b3026a1a946b0e5b1)) +* Reorganizing docs to fit diataxis framework ([#3711](https://github.com/noir-lang/noir/issues/3711)) ([54a1ed5](https://github.com/noir-lang/noir/commit/54a1ed58c991eefa7ac9304b894c7046c294487b)) +* Simplify explicit equality assertions to assert equality directly ([#3708](https://github.com/noir-lang/noir/issues/3708)) ([2fc46e2](https://github.com/noir-lang/noir/commit/2fc46e2269bba8d9ad6ae5fcea10e64dce9b3745)) +* Speed up transformation of debug messages ([#3815](https://github.com/noir-lang/noir/issues/3815)) ([2a8af1e](https://github.com/noir-lang/noir/commit/2a8af1e4141ffff61547ee1c2837a6392bd5db48)) + + +### Bug Fixes + +* `try_unify` no longer binds types on failure ([#3697](https://github.com/noir-lang/noir/issues/3697)) ([f03e581](https://github.com/noir-lang/noir/commit/f03e5812439bdf9d1aedc69debdc50ba5dba2049)) +* Add missing assertion to test ([#3765](https://github.com/noir-lang/noir/issues/3765)) ([bcbe116](https://github.com/noir-lang/noir/commit/bcbe11613b7205476a49ad0d588b868b4fc43ba1)) +* Add negative integer literals ([#3690](https://github.com/noir-lang/noir/issues/3690)) ([8b3a68f](https://github.com/noir-lang/noir/commit/8b3a68f5286c09e1f612dbcfff3fe41023ab7109)) +* Allow trait method references from the trait name ([#3774](https://github.com/noir-lang/noir/issues/3774)) ([cfa34d4](https://github.com/noir-lang/noir/commit/cfa34d4d913dbd35f8329430e0d58830e069d6ff)) +* Deserialize odd length hex literals ([#3747](https://github.com/noir-lang/noir/issues/3747)) ([4000fb2](https://github.com/noir-lang/noir/commit/4000fb279221eb07187d657bfaa7f1c7b311abf2)) +* **docs:** Trigger `update-docs` workflow when the `release-please` PR gets merged and not on every merge to master ([#3677](https://github.com/noir-lang/noir/issues/3677)) ([9a3d1d2](https://github.com/noir-lang/noir/commit/9a3d1d2cf647cd583344f8da122fed1acbca9397)) +* Initialise strings as u8 array ([#3682](https://github.com/noir-lang/noir/issues/3682)) ([8da40b7](https://github.com/noir-lang/noir/commit/8da40b75a36ebac51d5377311db3c55fa339dcac)) +* **lsp:** Package resolution on save ([#3794](https://github.com/noir-lang/noir/issues/3794)) ([14f2fff](https://github.com/noir-lang/noir/commit/14f2fffeb3de5f653c11694ee3c5e5d62aaa34ec)) +* Parse negative integer literals ([#3698](https://github.com/noir-lang/noir/issues/3698)) ([463ab06](https://github.com/noir-lang/noir/commit/463ab060075db1915127c3f6cef11bfed9d40109)) +* Pub is required on return for entry points ([#3616](https://github.com/noir-lang/noir/issues/3616)) ([7f1d796](https://github.com/noir-lang/noir/commit/7f1d7968368734e02b152e2e907dc7af9e1604c8)) +* Remove `noirc_driver/aztec` feature flag in docker ([#3784](https://github.com/noir-lang/noir/issues/3784)) ([a48d562](https://github.com/noir-lang/noir/commit/a48d562b59aa2009a9c9b65dd71e11cdd8d06cf0)) +* Remove include-keys option ([#3692](https://github.com/noir-lang/noir/issues/3692)) ([95d7ce2](https://github.com/noir-lang/noir/commit/95d7ce21016e3603bf279efb970536ad32d89a3a)) +* Revert chnage to modify version in workspace file for acvm dependencies ([#3673](https://github.com/noir-lang/noir/issues/3673)) ([0696f75](https://github.com/noir-lang/noir/commit/0696f755364293bcc7ebc7a0def0dcafede2e543)) +* Sequence update-lockfile workflow so it gets modified after the ACVM version in the root has been changed ([#3676](https://github.com/noir-lang/noir/issues/3676)) ([c00cd85](https://github.com/noir-lang/noir/commit/c00cd8537836f8e4d8559b01d16dfdd1b5cad519)) +* **ssa:** Handle array arguments to side effectual constrain statements ([#3740](https://github.com/noir-lang/noir/issues/3740)) ([028d65e](https://github.com/noir-lang/noir/commit/028d65ea71f9c11e69784d06e0f9768668455f83)) +* Stop cloning Traits! ([#3736](https://github.com/noir-lang/noir/issues/3736)) ([fcff412](https://github.com/noir-lang/noir/commit/fcff412bb39a04a5c88506ae5a5ee2fbdefd93ef)) +* Stop issuing unused variable warnings for variables in trait definitions ([#3797](https://github.com/noir-lang/noir/issues/3797)) ([0bb44c3](https://github.com/noir-lang/noir/commit/0bb44c3bbc63d385d77d93da6abd07214bcfd700)) +* Unsigned integers cannot be negated ([#3688](https://github.com/noir-lang/noir/issues/3688)) ([f904ae1](https://github.com/noir-lang/noir/commit/f904ae1065af74652b2111ea17b72f994de37472)) + + +### Miscellaneous Chores + +* Make file manager read-only to the compiler ([#3760](https://github.com/noir-lang/noir/issues/3760)) ([e3dcc21](https://github.com/noir-lang/noir/commit/e3dcc21cb2c0fef7f28f50b018747c4f09609b11)) +* Remove unused `source-resolver` package ([#3791](https://github.com/noir-lang/noir/issues/3791)) ([57d2505](https://github.com/noir-lang/noir/commit/57d2505d53e2233becd1e2a7de882c4acb518eff)) + ## [0.20.0](https://github.com/noir-lang/noir/compare/v0.19.5...v0.20.0) (2023-12-01) diff --git a/Cargo.lock b/Cargo.lock index 0ee4fdf1016..7f964cd58ef 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4,7 +4,7 @@ version = 3 [[package]] name = "acir" -version = "0.37.0" +version = "0.38.0" dependencies = [ "acir_field", "base64", @@ -23,7 +23,7 @@ dependencies = [ [[package]] name = "acir_field" -version = "0.37.0" +version = "0.38.0" dependencies = [ "ark-bls12-381", "ark-bn254", @@ -37,13 +37,13 @@ dependencies = [ [[package]] name = "acvm" -version = "0.37.0" +version = "0.38.0" dependencies = [ "acir", "acvm_blackbox_solver", - "acvm_stdlib", "brillig_vm", "indexmap 1.9.3", + "log", "num-bigint", "num-traits", "paste", @@ -54,7 +54,7 @@ dependencies = [ [[package]] name = "acvm_blackbox_solver" -version = "0.37.0" +version = "0.38.0" dependencies = [ "acir", "blake2", @@ -67,7 +67,7 @@ dependencies = [ [[package]] name = "acvm_js" -version = "0.37.0" +version = "0.38.0" dependencies = [ "acvm", "barretenberg_blackbox_solver", @@ -86,13 +86,6 @@ dependencies = [ "wasm-logger", ] -[[package]] -name = "acvm_stdlib" -version = "0.37.0" -dependencies = [ - "acir", -] - [[package]] name = "addr2line" version = "0.20.0" @@ -217,7 +210,7 @@ checksum = "a4668cab20f66d8d020e1fbc0ebe47217433c1b6c8f2040faf858554e394ace6" [[package]] name = "arena" -version = "0.20.0" +version = "0.22.0" dependencies = [ "generational-arena", ] @@ -426,7 +419,7 @@ checksum = "d468802bab17cbc0cc575e9b053f41e72aa36bfa6b7f55e3529ffa43161b97fa" [[package]] name = "aztec_macros" -version = "0.20.0" +version = "0.22.0" dependencies = [ "iter-extended", "noirc_frontend", @@ -469,7 +462,7 @@ dependencies = [ [[package]] name = "barretenberg_blackbox_solver" -version = "0.37.0" +version = "0.38.0" dependencies = [ "acir", "acvm_blackbox_solver", @@ -593,7 +586,7 @@ dependencies = [ [[package]] name = "brillig" -version = "0.37.0" +version = "0.38.0" dependencies = [ "acir_field", "serde", @@ -601,7 +594,7 @@ dependencies = [ [[package]] name = "brillig_vm" -version = "0.37.0" +version = "0.38.0" dependencies = [ "acir", "acvm_blackbox_solver", @@ -1639,7 +1632,7 @@ dependencies = [ [[package]] name = "fm" -version = "0.20.0" +version = "0.22.0" dependencies = [ "codespan-reporting", "iter-extended", @@ -2215,7 +2208,7 @@ dependencies = [ [[package]] name = "iter-extended" -version = "0.20.0" +version = "0.22.0" [[package]] name = "itertools" @@ -2437,12 +2430,13 @@ checksum = "7843ec2de400bcbc6a6328c958dc38e5359da6e93e72e37bc5246bf1ae776389" [[package]] name = "nargo" -version = "0.20.0" +version = "0.22.0" dependencies = [ "acvm", "codespan-reporting", "fm", "iter-extended", + "log", "noirc_abi", "noirc_driver", "noirc_errors", @@ -2458,7 +2452,7 @@ dependencies = [ [[package]] name = "nargo_cli" -version = "0.20.0" +version = "0.22.0" dependencies = [ "acvm", "assert_cmd", @@ -2511,7 +2505,7 @@ dependencies = [ [[package]] name = "nargo_fmt" -version = "0.20.0" +version = "0.22.0" dependencies = [ "bytecount", "noirc_frontend", @@ -2523,7 +2517,7 @@ dependencies = [ [[package]] name = "nargo_toml" -version = "0.20.0" +version = "0.22.0" dependencies = [ "dirs", "fm", @@ -2572,7 +2566,7 @@ dependencies = [ [[package]] name = "noir_debugger" -version = "0.20.0" +version = "0.22.0" dependencies = [ "acvm", "codespan-reporting", @@ -2586,7 +2580,7 @@ dependencies = [ [[package]] name = "noir_lsp" -version = "0.20.0" +version = "0.22.0" dependencies = [ "acvm", "async-lsp", @@ -2609,7 +2603,7 @@ dependencies = [ [[package]] name = "noir_wasm" -version = "0.20.0" +version = "0.22.0" dependencies = [ "acvm", "build-data", @@ -2630,7 +2624,7 @@ dependencies = [ [[package]] name = "noirc_abi" -version = "0.20.0" +version = "0.22.0" dependencies = [ "acvm", "iter-extended", @@ -2647,7 +2641,7 @@ dependencies = [ [[package]] name = "noirc_abi_wasm" -version = "0.20.0" +version = "0.22.0" dependencies = [ "acvm", "build-data", @@ -2664,7 +2658,7 @@ dependencies = [ [[package]] name = "noirc_driver" -version = "0.20.0" +version = "0.22.0" dependencies = [ "acvm", "aztec_macros", @@ -2673,6 +2667,7 @@ dependencies = [ "fm", "fxhash", "iter-extended", + "log", "noirc_abi", "noirc_errors", "noirc_evaluator", @@ -2683,25 +2678,27 @@ dependencies = [ [[package]] name = "noirc_errors" -version = "0.20.0" +version = "0.22.0" dependencies = [ "acvm", "chumsky", "codespan", "codespan-reporting", "fm", + "log", "serde", "serde_with", ] [[package]] name = "noirc_evaluator" -version = "0.20.0" +version = "0.22.0" dependencies = [ "acvm", "fxhash", "im", "iter-extended", + "log", "noirc_errors", "noirc_frontend", "num-bigint", @@ -2711,13 +2708,14 @@ dependencies = [ [[package]] name = "noirc_frontend" -version = "0.20.0" +version = "0.22.0" dependencies = [ "acvm", "arena", "chumsky", "fm", "iter-extended", + "log", "noirc_errors", "noirc_printable_type", "regex", @@ -2728,12 +2726,13 @@ dependencies = [ "smol_str", "strum", "strum_macros", + "tempfile", "thiserror", ] [[package]] name = "noirc_printable_type" -version = "0.20.0" +version = "0.22.0" dependencies = [ "acvm", "iter-extended", diff --git a/Cargo.toml b/Cargo.toml index 7cd00ee0a60..aaf060552e7 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -28,7 +28,6 @@ members = [ "acvm-repo/acir", "acvm-repo/acvm", "acvm-repo/acvm_js", - "acvm-repo/stdlib", "acvm-repo/brillig", "acvm-repo/brillig_vm", "acvm-repo/blackbox_solver", @@ -39,7 +38,7 @@ resolver = "2" [workspace.package] # x-release-please-start-version -version = "0.20.0" +version = "0.22.0" # x-release-please-end authors = ["The Noir Team "] edition = "2021" @@ -50,14 +49,14 @@ repository = "https://github.com/noir-lang/noir/" [workspace.dependencies] # ACVM workspace dependencies -acir_field = { version = "0.37.0", path = "acvm-repo/acir_field", default-features = false } -acir = { version = "0.37.0", path = "acvm-repo/acir", default-features = false } -acvm = { version = "0.37.0", path = "acvm-repo/acvm" } -stdlib = { version = "0.37.0", package = "acvm_stdlib", path = "acvm-repo/stdlib", default-features = false } -brillig = { version = "0.37.0", path = "acvm-repo/brillig", default-features = false } -brillig_vm = { version = "0.37.0", path = "acvm-repo/brillig_vm", default-features = false } -acvm_blackbox_solver = { version = "0.37.0", path = "acvm-repo/blackbox_solver", default-features = false } -barretenberg_blackbox_solver = { version = "0.37.0", path = "acvm-repo/barretenberg_blackbox_solver", default-features = false } +acir_field = { version = "0.38.0", path = "acvm-repo/acir_field", default-features = false } +acir = { version = "0.38.0", path = "acvm-repo/acir", default-features = false } +acvm = { version = "0.38.0", path = "acvm-repo/acvm" } +stdlib = { version = "0.37.1", package = "acvm_stdlib", path = "acvm-repo/stdlib", default-features = false } +brillig = { version = "0.38.0", path = "acvm-repo/brillig", default-features = false } +brillig_vm = { version = "0.38.0", path = "acvm-repo/brillig_vm", default-features = false } +acvm_blackbox_solver = { version = "0.38.0", path = "acvm-repo/blackbox_solver", default-features = false } +barretenberg_blackbox_solver = { version = "0.38.0", path = "acvm-repo/barretenberg_blackbox_solver", default-features = false } # Noir compiler workspace dependencies arena = { path = "compiler/utils/arena" } @@ -121,6 +120,7 @@ num-bigint = "0.4" num-traits = "0.2" similar-asserts = "1.5.0" log = "0.4.17" +tempfile = "3.6.0" tracing = "0.1.40" diff --git a/acvm-repo/CHANGELOG.md b/acvm-repo/CHANGELOG.md index fea0029744b..0bd38fd3307 100644 --- a/acvm-repo/CHANGELOG.md +++ b/acvm-repo/CHANGELOG.md @@ -5,6 +5,43 @@ All notable changes to this project will be documented in this file. The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). +## [0.38.0](https://github.com/noir-lang/noir/compare/v0.37.1...v0.38.0) (2023-12-18) + + +### ⚠ BREAKING CHANGES + +* Remove unused methods on ACIR opcodes ([#3841](https://github.com/noir-lang/noir/issues/3841)) +* Remove partial backend feature ([#3805](https://github.com/noir-lang/noir/issues/3805)) + +### Features + +* Aztec-packages ([#3754](https://github.com/noir-lang/noir/issues/3754)) ([c043265](https://github.com/noir-lang/noir/commit/c043265e550b59bd4296504826fe15d3ce3e9ad2)) +* Speed up transformation of debug messages ([#3815](https://github.com/noir-lang/noir/issues/3815)) ([2a8af1e](https://github.com/noir-lang/noir/commit/2a8af1e4141ffff61547ee1c2837a6392bd5db48)) + + +### Bug Fixes + +* Deserialize odd length hex literals ([#3747](https://github.com/noir-lang/noir/issues/3747)) ([4000fb2](https://github.com/noir-lang/noir/commit/4000fb279221eb07187d657bfaa7f1c7b311abf2)) + + +### Miscellaneous Chores + +* Remove partial backend feature ([#3805](https://github.com/noir-lang/noir/issues/3805)) ([0383100](https://github.com/noir-lang/noir/commit/0383100853a80a5b28b797cdfeae0d271f1b7805)) +* Remove unused methods on ACIR opcodes ([#3841](https://github.com/noir-lang/noir/issues/3841)) ([9e5d0e8](https://github.com/noir-lang/noir/commit/9e5d0e813d61a0bfb5ee68174ed287c5a20f1579)) + +## [0.37.1](https://github.com/noir-lang/noir/compare/v0.37.0...v0.37.1) (2023-12-15) + + +### Features + +* Aztec-packages ([#3754](https://github.com/noir-lang/noir/issues/3754)) ([c043265](https://github.com/noir-lang/noir/commit/c043265e550b59bd4296504826fe15d3ce3e9ad2)) +* Speed up transformation of debug messages ([#3815](https://github.com/noir-lang/noir/issues/3815)) ([2a8af1e](https://github.com/noir-lang/noir/commit/2a8af1e4141ffff61547ee1c2837a6392bd5db48)) + + +### Bug Fixes + +* Deserialize odd length hex literals ([#3747](https://github.com/noir-lang/noir/issues/3747)) ([4000fb2](https://github.com/noir-lang/noir/commit/4000fb279221eb07187d657bfaa7f1c7b311abf2)) + ## [0.37.0](https://github.com/noir-lang/noir/compare/v0.36.0...v0.37.0) (2023-12-01) diff --git a/acvm-repo/acir/Cargo.toml b/acvm-repo/acir/Cargo.toml index 100ab06aff0..a0877120a58 100644 --- a/acvm-repo/acir/Cargo.toml +++ b/acvm-repo/acir/Cargo.toml @@ -2,7 +2,7 @@ name = "acir" description = "ACIR is the IR that the VM processes, it is analogous to LLVM IR" # x-release-please-start-version -version = "0.37.0" +version = "0.38.0" # x-release-please-end authors.workspace = true edition.workspace = true diff --git a/acvm-repo/acir/src/circuit/black_box_functions.rs b/acvm-repo/acir/src/circuit/black_box_functions.rs index 9129f44008c..dec60c09077 100644 --- a/acvm-repo/acir/src/circuit/black_box_functions.rs +++ b/acvm-repo/acir/src/circuit/black_box_functions.rs @@ -1,8 +1,5 @@ //! Black box functions are ACIR opcodes which rely on backends implementing support for specialized constraints. //! This makes certain zk-snark unfriendly computations cheaper than if they were implemented in more basic constraints. -//! -//! It is possible to fallback to less efficient implementations written in ACIR in some cases. -//! These are implemented inside the ACVM stdlib. use serde::{Deserialize, Serialize}; #[cfg(test)] @@ -64,7 +61,7 @@ impl BlackBoxFunc { BlackBoxFunc::SHA256 => "sha256", BlackBoxFunc::SchnorrVerify => "schnorr_verify", BlackBoxFunc::Blake2s => "blake2s", - BlackBoxFunc::PedersenCommitment => "pedersen", + BlackBoxFunc::PedersenCommitment => "pedersen_commitment", BlackBoxFunc::PedersenHash => "pedersen_hash", BlackBoxFunc::HashToField128Security => "hash_to_field_128_security", BlackBoxFunc::EcdsaSecp256k1 => "ecdsa_secp256k1", @@ -82,7 +79,7 @@ impl BlackBoxFunc { "sha256" => Some(BlackBoxFunc::SHA256), "schnorr_verify" => Some(BlackBoxFunc::SchnorrVerify), "blake2s" => Some(BlackBoxFunc::Blake2s), - "pedersen" => Some(BlackBoxFunc::PedersenCommitment), + "pedersen_commitment" => Some(BlackBoxFunc::PedersenCommitment), "pedersen_hash" => Some(BlackBoxFunc::PedersenHash), "hash_to_field_128_security" => Some(BlackBoxFunc::HashToField128Security), "ecdsa_secp256k1" => Some(BlackBoxFunc::EcdsaSecp256k1), diff --git a/acvm-repo/acir/src/circuit/directives.rs b/acvm-repo/acir/src/circuit/directives.rs index a86eb525c1f..c3a5b055f19 100644 --- a/acvm-repo/acir/src/circuit/directives.rs +++ b/acvm-repo/acir/src/circuit/directives.rs @@ -34,13 +34,3 @@ pub enum Directive { sort_by: Vec, // specify primary index to sort by, then the secondary,... For instance, if tuple is 2 and sort_by is [1,0], then a=[(a0,b0),..] is sorted by bi and then ai. }, } - -impl Directive { - pub fn name(&self) -> &str { - match self { - Directive::Quotient(_) => "quotient", - Directive::ToLeRadix { .. } => "to_le_radix", - Directive::PermutationSort { .. } => "permutation_sort", - } - } -} diff --git a/acvm-repo/acir/src/circuit/opcodes.rs b/acvm-repo/acir/src/circuit/opcodes.rs index dc7f73b47e5..0e15fe3757c 100644 --- a/acvm-repo/acir/src/circuit/opcodes.rs +++ b/acvm-repo/acir/src/circuit/opcodes.rs @@ -33,58 +33,6 @@ pub enum Opcode { }, } -#[derive(Clone, PartialEq, Eq, Debug)] -pub enum UnsupportedMemoryOpcode { - MemoryOp, - MemoryInit, -} - -impl std::fmt::Display for UnsupportedMemoryOpcode { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - match self { - UnsupportedMemoryOpcode::MemoryOp => write!(f, "MemoryOp"), - UnsupportedMemoryOpcode::MemoryInit => write!(f, "MemoryInit"), - } - } -} - -impl Opcode { - // TODO We can add a domain separator by doing something like: - // TODO concat!("directive:", directive.name) - pub fn name(&self) -> &str { - match self { - Opcode::Arithmetic(_) => "arithmetic", - Opcode::Directive(directive) => directive.name(), - Opcode::BlackBoxFuncCall(g) => g.name(), - Opcode::Brillig(_) => "brillig", - Opcode::MemoryOp { .. } => "mem", - Opcode::MemoryInit { .. } => "init memory block", - } - } - - pub fn unsupported_opcode(&self) -> UnsupportedMemoryOpcode { - match self { - Opcode::MemoryOp { .. } => UnsupportedMemoryOpcode::MemoryOp, - Opcode::MemoryInit { .. } => UnsupportedMemoryOpcode::MemoryInit, - Opcode::BlackBoxFuncCall(_) => { - unreachable!("Unsupported Blackbox function should not be reported here") - } - _ => unreachable!("Opcode is supported"), - } - } - - pub fn is_arithmetic(&self) -> bool { - matches!(self, Opcode::Arithmetic(_)) - } - - pub fn arithmetic(self) -> Option { - match self { - Opcode::Arithmetic(expr) => Some(expr), - _ => None, - } - } -} - impl std::fmt::Display for Opcode { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { match self { diff --git a/acvm-repo/acir_field/Cargo.toml b/acvm-repo/acir_field/Cargo.toml index 76030bc8863..cedfc66e734 100644 --- a/acvm-repo/acir_field/Cargo.toml +++ b/acvm-repo/acir_field/Cargo.toml @@ -2,7 +2,7 @@ name = "acir_field" description = "The field implementation being used by ACIR." # x-release-please-start-version -version = "0.37.0" +version = "0.38.0" # x-release-please-end authors.workspace = true edition.workspace = true diff --git a/acvm-repo/acvm/Cargo.toml b/acvm-repo/acvm/Cargo.toml index 6e4c2d322bb..5fdf44cbd5e 100644 --- a/acvm-repo/acvm/Cargo.toml +++ b/acvm-repo/acvm/Cargo.toml @@ -2,7 +2,7 @@ name = "acvm" description = "The virtual machine that processes ACIR given a backend/proof system." # x-release-please-start-version -version = "0.37.0" +version = "0.38.0" # x-release-please-end authors.workspace = true edition.workspace = true @@ -16,30 +16,26 @@ repository.workspace = true num-bigint.workspace = true num-traits.workspace = true thiserror.workspace = true +log.workspace = true acir.workspace = true -stdlib.workspace = true brillig_vm.workspace = true acvm_blackbox_solver.workspace = true indexmap = "1.7.0" [features] -default = ["bn254", "testing"] +default = ["bn254"] bn254 = [ "acir/bn254", - "stdlib/bn254", "brillig_vm/bn254", "acvm_blackbox_solver/bn254", ] bls12_381 = [ "acir/bls12_381", - "stdlib/bls12_381", "brillig_vm/bls12_381", "acvm_blackbox_solver/bls12_381", ] -testing = ["stdlib/testing", "unstable-fallbacks"] -unstable-fallbacks = [] [dev-dependencies] rand = "0.8.5" diff --git a/acvm-repo/acvm/src/compiler/mod.rs b/acvm-repo/acvm/src/compiler/mod.rs index 4abf94a2e78..ccb043914d6 100644 --- a/acvm-repo/acvm/src/compiler/mod.rs +++ b/acvm-repo/acvm/src/compiler/mod.rs @@ -1,10 +1,8 @@ -use acir::{ - circuit::{opcodes::UnsupportedMemoryOpcode, Circuit, Opcode, OpcodeLocation}, - BlackBoxFunc, -}; -use thiserror::Error; +use std::collections::HashMap; -use crate::Language; +use acir::circuit::{Circuit, OpcodeLocation}; + +use crate::ExpressionWidth; // The various passes that we can use over ACIR mod optimizers; @@ -15,24 +13,26 @@ use optimizers::optimize_internal; pub use transformers::transform; use transformers::transform_internal; -#[derive(PartialEq, Eq, Debug, Error)] -pub enum CompileError { - #[error("The blackbox function {0} is not supported by the backend and acvm does not have a fallback implementation")] - UnsupportedBlackBox(BlackBoxFunc), - #[error("The opcode {0} is not supported by the backend and acvm does not have a fallback implementation")] - UnsupportedMemoryOpcode(UnsupportedMemoryOpcode), -} - /// This module moves and decomposes acir opcodes. The transformation map allows consumers of this module to map /// metadata they had about the opcodes to the new opcode structure generated after the transformation. #[derive(Debug)] pub struct AcirTransformationMap { - /// This is a vector of pointers to the old acir opcodes. The index of the vector is the new opcode index. - /// The value of the vector is the old opcode index pointed. - acir_opcode_positions: Vec, + /// Maps the old acir indices to the new acir indices + old_indices_to_new_indices: HashMap>, } impl AcirTransformationMap { + /// Builds a map from a vector of pointers to the old acir opcodes. + /// The index of the vector is the new opcode index. + /// The value of the vector is the old opcode index pointed. + fn new(acir_opcode_positions: Vec) -> Self { + let mut old_indices_to_new_indices = HashMap::with_capacity(acir_opcode_positions.len()); + for (new_index, old_index) in acir_opcode_positions.into_iter().enumerate() { + old_indices_to_new_indices.entry(old_index).or_insert_with(Vec::new).push(new_index); + } + AcirTransformationMap { old_indices_to_new_indices } + } + pub fn new_locations( &self, old_location: OpcodeLocation, @@ -42,16 +42,16 @@ impl AcirTransformationMap { OpcodeLocation::Brillig { acir_index, .. } => acir_index, }; - self.acir_opcode_positions - .iter() - .enumerate() - .filter(move |(_, &old_index)| old_index == old_acir_index) - .map(move |(new_index, _)| match old_location { - OpcodeLocation::Acir(_) => OpcodeLocation::Acir(new_index), - OpcodeLocation::Brillig { brillig_index, .. } => { - OpcodeLocation::Brillig { acir_index: new_index, brillig_index } - } - }) + self.old_indices_to_new_indices.get(&old_acir_index).into_iter().flat_map( + move |new_indices| { + new_indices.iter().map(move |new_index| match old_location { + OpcodeLocation::Acir(_) => OpcodeLocation::Acir(*new_index), + OpcodeLocation::Brillig { brillig_index, .. } => { + OpcodeLocation::Brillig { acir_index: *new_index, brillig_index } + } + }) + }, + ) } } @@ -71,15 +71,16 @@ fn transform_assert_messages( /// Applies [`ProofSystemCompiler`][crate::ProofSystemCompiler] specific optimizations to a [`Circuit`]. pub fn compile( acir: Circuit, - np_language: Language, - is_opcode_supported: impl Fn(&Opcode) -> bool, -) -> Result<(Circuit, AcirTransformationMap), CompileError> { - let (acir, AcirTransformationMap { acir_opcode_positions }) = optimize_internal(acir); + expression_width: ExpressionWidth, +) -> (Circuit, AcirTransformationMap) { + let (acir, acir_opcode_positions) = optimize_internal(acir); + + let (mut acir, acir_opcode_positions) = + transform_internal(acir, expression_width, acir_opcode_positions); - let (mut acir, transformation_map) = - transform_internal(acir, np_language, is_opcode_supported, acir_opcode_positions)?; + let transformation_map = AcirTransformationMap::new(acir_opcode_positions); acir.assert_messages = transform_assert_messages(acir.assert_messages, &transformation_map); - Ok((acir, transformation_map)) + (acir, transformation_map) } diff --git a/acvm-repo/acvm/src/compiler/optimizers/mod.rs b/acvm-repo/acvm/src/compiler/optimizers/mod.rs index 627ddbb4117..85a97c2c7dc 100644 --- a/acvm-repo/acvm/src/compiler/optimizers/mod.rs +++ b/acvm-repo/acvm/src/compiler/optimizers/mod.rs @@ -13,7 +13,9 @@ use super::{transform_assert_messages, AcirTransformationMap}; /// Applies [`ProofSystemCompiler`][crate::ProofSystemCompiler] independent optimizations to a [`Circuit`]. pub fn optimize(acir: Circuit) -> (Circuit, AcirTransformationMap) { - let (mut acir, transformation_map) = optimize_internal(acir); + let (mut acir, new_opcode_positions) = optimize_internal(acir); + + let transformation_map = AcirTransformationMap::new(new_opcode_positions); acir.assert_messages = transform_assert_messages(acir.assert_messages, &transformation_map); @@ -21,7 +23,9 @@ pub fn optimize(acir: Circuit) -> (Circuit, AcirTransformationMap) { } /// Applies [`ProofSystemCompiler`][crate::ProofSystemCompiler] independent optimizations to a [`Circuit`]. -pub(super) fn optimize_internal(acir: Circuit) -> (Circuit, AcirTransformationMap) { +pub(super) fn optimize_internal(acir: Circuit) -> (Circuit, Vec) { + log::trace!("Start circuit optimization"); + // General optimizer pass let opcodes: Vec = acir .opcodes @@ -50,7 +54,7 @@ pub(super) fn optimize_internal(acir: Circuit) -> (Circuit, AcirTransformationMa let (acir, acir_opcode_positions) = range_optimizer.replace_redundant_ranges(acir_opcode_positions); - let transformation_map = AcirTransformationMap { acir_opcode_positions }; + log::trace!("Finish circuit optimization"); - (acir, transformation_map) + (acir, acir_opcode_positions) } diff --git a/acvm-repo/acvm/src/compiler/transformers/csat.rs b/acvm-repo/acvm/src/compiler/transformers/csat.rs index 9f89ac4671a..0d1ab87aae5 100644 --- a/acvm-repo/acvm/src/compiler/transformers/csat.rs +++ b/acvm-repo/acvm/src/compiler/transformers/csat.rs @@ -9,7 +9,7 @@ use indexmap::IndexMap; /// A transformer which processes any [`Expression`]s to break them up such that they /// fit within the [`ProofSystemCompiler`][crate::ProofSystemCompiler]'s width. /// -/// This transformer is only used when targetting the [`PLONKCSat`][crate::Language::PLONKCSat] language. +/// This transformer is only used when targeting the [`Bounded`][crate::ExpressionWidth::Bounded] configuration. /// /// This is done by creating intermediate variables to hold partial calculations and then combining them /// to calculate the original expression. diff --git a/acvm-repo/acvm/src/compiler/transformers/fallback.rs b/acvm-repo/acvm/src/compiler/transformers/fallback.rs deleted file mode 100644 index 06dfc84a798..00000000000 --- a/acvm-repo/acvm/src/compiler/transformers/fallback.rs +++ /dev/null @@ -1,158 +0,0 @@ -use super::super::CompileError; -use acir::{ - circuit::{opcodes::BlackBoxFuncCall, Circuit, Opcode}, - native_types::Expression, -}; - -/// The initial transformer to act on a [`Circuit`]. This replaces any unsupported opcodes with -/// fallback implementations consisting of well supported opcodes. -pub(crate) struct FallbackTransformer; - -impl FallbackTransformer { - //ACIR pass which replace unsupported opcodes using arithmetic fallback - pub(crate) fn transform( - acir: Circuit, - is_supported: impl Fn(&Opcode) -> bool, - opcode_positions: Vec, - ) -> Result<(Circuit, Vec), CompileError> { - let mut acir_supported_opcodes = Vec::with_capacity(acir.opcodes.len()); - let mut new_opcode_positions = Vec::with_capacity(opcode_positions.len()); - let mut witness_idx = acir.current_witness_index + 1; - - for (idx, opcode) in acir.opcodes.into_iter().enumerate() { - match &opcode { - Opcode::Arithmetic(_) | Opcode::Directive(_) | Opcode::Brillig(_) => { - // directive, arithmetic expression or blocks are handled by acvm - new_opcode_positions.push(opcode_positions[idx]); - acir_supported_opcodes.push(opcode); - continue; - } - Opcode::MemoryInit { .. } | Opcode::MemoryOp { .. } => { - if !is_supported(&opcode) { - return Err(CompileError::UnsupportedMemoryOpcode( - opcode.unsupported_opcode(), - )); - } - new_opcode_positions.push(opcode_positions[idx]); - acir_supported_opcodes.push(opcode); - } - Opcode::BlackBoxFuncCall(bb_func_call) => { - // We know it is an black box function. Now check if it is - // supported by the backend. If it is supported, then we can simply - // collect the opcode - if is_supported(&opcode) { - new_opcode_positions.push(opcode_positions[idx]); - acir_supported_opcodes.push(opcode); - continue; - } else { - // If we get here then we know that this black box function is not supported - // so we need to replace it with a version of the opcode which only uses arithmetic - // expressions - let (updated_witness_index, opcodes_fallback) = - Self::opcode_fallback(bb_func_call, witness_idx)?; - witness_idx = updated_witness_index; - new_opcode_positions - .extend(vec![opcode_positions[idx]; opcodes_fallback.len()]); - acir_supported_opcodes.extend(opcodes_fallback); - } - } - } - } - - Ok(( - Circuit { - current_witness_index: witness_idx - 1, - opcodes: acir_supported_opcodes, - ..acir - }, - new_opcode_positions, - )) - } - - fn opcode_fallback( - gc: &BlackBoxFuncCall, - current_witness_idx: u32, - ) -> Result<(u32, Vec), CompileError> { - let (updated_witness_index, opcodes_fallback) = match gc { - BlackBoxFuncCall::AND { lhs, rhs, output } => { - assert_eq!( - lhs.num_bits, rhs.num_bits, - "number of bits specified for each input must be the same" - ); - stdlib::blackbox_fallbacks::and( - Expression::from(lhs.witness), - Expression::from(rhs.witness), - *output, - lhs.num_bits, - current_witness_idx, - ) - } - BlackBoxFuncCall::XOR { lhs, rhs, output } => { - assert_eq!( - lhs.num_bits, rhs.num_bits, - "number of bits specified for each input must be the same" - ); - stdlib::blackbox_fallbacks::xor( - Expression::from(lhs.witness), - Expression::from(rhs.witness), - *output, - lhs.num_bits, - current_witness_idx, - ) - } - BlackBoxFuncCall::RANGE { input } => { - // Note there are no outputs because range produces no outputs - stdlib::blackbox_fallbacks::range( - Expression::from(input.witness), - input.num_bits, - current_witness_idx, - ) - } - #[cfg(feature = "unstable-fallbacks")] - BlackBoxFuncCall::SHA256 { inputs, outputs } => { - let sha256_inputs = - inputs.iter().map(|input| (input.witness.into(), input.num_bits)).collect(); - stdlib::blackbox_fallbacks::sha256( - sha256_inputs, - outputs.to_vec(), - current_witness_idx, - ) - } - #[cfg(feature = "unstable-fallbacks")] - BlackBoxFuncCall::Blake2s { inputs, outputs } => { - let blake2s_inputs = - inputs.iter().map(|input| (input.witness.into(), input.num_bits)).collect(); - stdlib::blackbox_fallbacks::blake2s( - blake2s_inputs, - outputs.to_vec(), - current_witness_idx, - ) - } - #[cfg(feature = "unstable-fallbacks")] - BlackBoxFuncCall::HashToField128Security { inputs, output } => { - let hash_to_field_inputs = - inputs.iter().map(|input| (input.witness.into(), input.num_bits)).collect(); - stdlib::blackbox_fallbacks::hash_to_field( - hash_to_field_inputs, - *output, - current_witness_idx, - ) - } - #[cfg(feature = "unstable-fallbacks")] - BlackBoxFuncCall::Keccak256 { inputs, outputs } => { - let keccak_inputs = - inputs.iter().map(|input| (input.witness.into(), input.num_bits)).collect(); - stdlib::blackbox_fallbacks::keccak256( - keccak_inputs, - outputs.to_vec(), - current_witness_idx, - ) - } - _ => { - return Err(CompileError::UnsupportedBlackBox(gc.get_black_box_func())); - } - }; - - Ok((updated_witness_index, opcodes_fallback)) - } -} diff --git a/acvm-repo/acvm/src/compiler/transformers/mod.rs b/acvm-repo/acvm/src/compiler/transformers/mod.rs index d827b759666..2a3e28c536a 100644 --- a/acvm-repo/acvm/src/compiler/transformers/mod.rs +++ b/acvm-repo/acvm/src/compiler/transformers/mod.rs @@ -5,34 +5,33 @@ use acir::{ }; use indexmap::IndexMap; -use crate::Language; +use crate::ExpressionWidth; mod csat; -mod fallback; mod r1cs; pub(crate) use csat::CSatTransformer; -pub(crate) use fallback::FallbackTransformer; pub(crate) use r1cs::R1CSTransformer; -use super::{transform_assert_messages, AcirTransformationMap, CompileError}; +use super::{transform_assert_messages, AcirTransformationMap}; /// Applies [`ProofSystemCompiler`][crate::ProofSystemCompiler] specific optimizations to a [`Circuit`]. pub fn transform( acir: Circuit, - np_language: Language, - is_opcode_supported: impl Fn(&Opcode) -> bool, -) -> Result<(Circuit, AcirTransformationMap), CompileError> { + expression_width: ExpressionWidth, +) -> (Circuit, AcirTransformationMap) { // Track original acir opcode positions throughout the transformation passes of the compilation // by applying the modifications done to the circuit opcodes and also to the opcode_positions (delete and insert) let acir_opcode_positions = acir.opcodes.iter().enumerate().map(|(i, _)| i).collect(); - let (mut acir, transformation_map) = - transform_internal(acir, np_language, is_opcode_supported, acir_opcode_positions)?; + let (mut acir, acir_opcode_positions) = + transform_internal(acir, expression_width, acir_opcode_positions); + + let transformation_map = AcirTransformationMap::new(acir_opcode_positions); acir.assert_messages = transform_assert_messages(acir.assert_messages, &transformation_map); - Ok((acir, transformation_map)) + (acir, transformation_map) } /// Applies [`ProofSystemCompiler`][crate::ProofSystemCompiler] specific optimizations to a [`Circuit`]. @@ -40,21 +39,17 @@ pub fn transform( /// Accepts an injected `acir_opcode_positions` to allow transformations to be applied directly after optimizations. pub(super) fn transform_internal( acir: Circuit, - np_language: Language, - is_opcode_supported: impl Fn(&Opcode) -> bool, + expression_width: ExpressionWidth, acir_opcode_positions: Vec, -) -> Result<(Circuit, AcirTransformationMap), CompileError> { - // Fallback transformer pass - let (acir, acir_opcode_positions) = - FallbackTransformer::transform(acir, is_opcode_supported, acir_opcode_positions)?; - - let mut transformer = match &np_language { - crate::Language::R1CS => { - let transformation_map = AcirTransformationMap { acir_opcode_positions }; +) -> (Circuit, Vec) { + log::trace!("Start circuit transformation"); + + let mut transformer = match &expression_width { + crate::ExpressionWidth::Unbounded => { let transformer = R1CSTransformer::new(acir); - return Ok((transformer.transform(), transformation_map)); + return (transformer.transform(), acir_opcode_positions); } - crate::Language::PLONKCSat { width } => { + crate::ExpressionWidth::Bounded { width } => { let mut csat = CSatTransformer::new(*width); for value in acir.circuit_arguments() { csat.mark_solvable(value); @@ -214,8 +209,7 @@ pub(super) fn transform_internal( ..acir }; - let transformation_map = - AcirTransformationMap { acir_opcode_positions: new_acir_opcode_positions }; + log::trace!("Finish circuit transformation"); - Ok((acir, transformation_map)) + (acir, new_acir_opcode_positions) } diff --git a/acvm-repo/acvm/src/lib.rs b/acvm-repo/acvm/src/lib.rs index 0ab037a2e4b..626bb2c9b91 100644 --- a/acvm-repo/acvm/src/lib.rs +++ b/acvm-repo/acvm/src/lib.rs @@ -18,10 +18,16 @@ pub use brillig_vm; // re-export blackbox solver pub use acvm_blackbox_solver as blackbox_solver; -/// Supported NP complete languages -/// This might need to be in ACIR instead +/// Specifies the maximum width of the expressions which will be constrained. +/// +/// Unbounded Expressions are useful if you are eventually going to pass the ACIR +/// into a proving system which supports R1CS. +/// +/// Bounded Expressions are useful if you are eventually going to pass the ACIR +/// into a proving system which supports PLONK, where arithmetic expressions have a +/// finite fan-in. #[derive(Debug, Clone, Copy)] -pub enum Language { - R1CS, - PLONKCSat { width: usize }, +pub enum ExpressionWidth { + Unbounded, + Bounded { width: usize }, } diff --git a/acvm-repo/acvm/src/pwg/mod.rs b/acvm-repo/acvm/src/pwg/mod.rs index c1edf60161a..859ad010dcd 100644 --- a/acvm-repo/acvm/src/pwg/mod.rs +++ b/acvm-repo/acvm/src/pwg/mod.rs @@ -11,7 +11,7 @@ use acir::{ use acvm_blackbox_solver::BlackBoxResolutionError; use self::{arithmetic::ArithmeticSolver, directives::solve_directives, memory_op::MemoryOpSolver}; -use crate::{BlackBoxFunctionSolver, Language}; +use crate::BlackBoxFunctionSolver; use thiserror::Error; @@ -104,8 +104,6 @@ impl std::fmt::Display for ErrorLocation { pub enum OpcodeResolutionError { #[error("Cannot solve opcode: {0}")] OpcodeNotSolvable(#[from] OpcodeNotSolvable), - #[error("Backend does not currently support the {0} opcode. ACVM does not currently have a fallback for this opcode.")] - UnsupportedBlackBoxFunc(BlackBoxFunc), #[error("Cannot satisfy constraint")] UnsatisfiedConstrain { opcode_location: ErrorLocation }, #[error("Index out of bounds, array has size {array_size:?}, but index was {index:?}")] @@ -122,9 +120,6 @@ impl From for OpcodeResolutionError { BlackBoxResolutionError::Failed(func, reason) => { OpcodeResolutionError::BlackBoxFunctionFailed(func, reason) } - BlackBoxResolutionError::Unsupported(func) => { - OpcodeResolutionError::UnsupportedBlackBoxFunc(func) - } } } } @@ -450,30 +445,3 @@ fn any_witness_from_expression(expr: &Expression) -> Option { Some(expr.linear_combinations[0].1) } } - -#[deprecated( - note = "For backwards compatibility, this method allows you to derive _sensible_ defaults for opcode support based on the np language. \n Backends should simply specify what they support." -)] -// This is set to match the previous functionality that we had -// Where we could deduce what opcodes were supported -// by knowing the np complete language -pub fn default_is_opcode_supported(language: Language) -> fn(&Opcode) -> bool { - // R1CS does not support any of the opcode except Arithmetic by default. - // The compiler will replace those that it can -- ie range, xor, and - fn r1cs_is_supported(opcode: &Opcode) -> bool { - matches!(opcode, Opcode::Arithmetic(_)) - } - - // PLONK supports most of the opcodes by default - // The ones which are not supported, the acvm compiler will - // attempt to transform into supported opcodes. If these are also not available - // then a compiler error will be emitted. - fn plonk_is_supported(_opcode: &Opcode) -> bool { - true - } - - match language { - Language::R1CS => r1cs_is_supported, - Language::PLONKCSat { .. } => plonk_is_supported, - } -} diff --git a/acvm-repo/acvm/tests/stdlib.rs b/acvm-repo/acvm/tests/stdlib.rs deleted file mode 100644 index c96c55f9401..00000000000 --- a/acvm-repo/acvm/tests/stdlib.rs +++ /dev/null @@ -1,354 +0,0 @@ -#![cfg(feature = "testing")] -mod solver; -use crate::solver::StubbedBackend; -use acir::{ - circuit::{ - opcodes::{BlackBoxFuncCall, FunctionInput}, - Circuit, Opcode, - }, - native_types::{Expression, Witness}, - FieldElement, -}; -use acvm::{ - compiler::compile, - pwg::{ACVMStatus, ACVM}, - Language, -}; -use acvm_blackbox_solver::{blake2s, hash_to_field_128_security, keccak256, sha256}; -use paste::paste; -use proptest::prelude::*; -use std::collections::{BTreeMap, BTreeSet}; -use stdlib::blackbox_fallbacks::{UInt32, UInt64, UInt8}; - -test_uint!(test_uint8, UInt8, u8, 8); -test_uint!(test_uint32, UInt32, u32, 32); -test_uint!(test_uint64, UInt64, u64, 64); - -#[macro_export] -macro_rules! test_uint { - ( - $name:tt, - $uint:ident, - $u:ident, - $size:expr - ) => { - paste! { - test_uint_inner!( - [<$name _rol>], - [<$name _ror>], - [<$name _euclidean_division>], - [<$name _add>], - [<$name _sub>], - [<$name _left_shift>], - [<$name _right_shift>], - [<$name _less_than>], - $uint, - $u, - $size - ); - } - }; -} - -#[macro_export] -macro_rules! test_uint_inner { - ( - $rol:tt, - $ror:tt, - $euclidean_division:tt, - $add:tt, - $sub:tt, - $left_shift:tt, - $right_shift:tt, - $less_than:tt, - $uint: ident, - $u: ident, - $size: expr - ) => { - proptest! { - #[test] - fn $rol(x in 0..$u::MAX, y in 0..32_u32) { - let fe = FieldElement::from(x as u128); - let w = Witness(1); - let result = x.rotate_left(y); - let uint = $uint::new(w); - let (w, extra_opcodes, _) = uint.rol(y, 2); - let witness_assignments = BTreeMap::from([(Witness(1), fe)]).into(); - let mut acvm = ACVM::new(&StubbedBackend, &extra_opcodes, witness_assignments); - let solver_status = acvm.solve(); - - prop_assert_eq!(acvm.witness_map().get(&w.get_inner()).unwrap(), &FieldElement::from(result as u128)); - prop_assert_eq!(solver_status, ACVMStatus::Solved, "should be fully solved"); - } - - #[test] - fn $ror(x in 0..$u::MAX, y in 0..32_u32) { - let fe = FieldElement::from(x as u128); - let w = Witness(1); - let result = x.rotate_right(y); - let uint = $uint::new(w); - let (w, extra_opcodes, _) = uint.ror(y, 2); - let witness_assignments = BTreeMap::from([(Witness(1), fe)]).into(); - let mut acvm = ACVM::new(&StubbedBackend, &extra_opcodes, witness_assignments); - let solver_status = acvm.solve(); - - prop_assert_eq!(acvm.witness_map().get(&w.get_inner()).unwrap(), &FieldElement::from(result as u128)); - prop_assert_eq!(solver_status, ACVMStatus::Solved, "should be fully solved"); - } - - #[test] - fn $euclidean_division(x in 0..$u::MAX, y in 1 - ..$u::MAX) { - let lhs = FieldElement::from(x as u128); - let rhs = FieldElement::from(y as u128); - let w1 = Witness(1); - let w2 = Witness(2); - let q = x.div_euclid(y); - let r = x.rem_euclid(y); - let u32_1 = $uint::new(w1); - let u32_2 = $uint::new(w2); - let (q_w, r_w, extra_opcodes, _) = $uint::euclidean_division(&u32_1, &u32_2, 3); - let witness_assignments = BTreeMap::from([(Witness(1), lhs),(Witness(2), rhs)]).into(); - let mut acvm = ACVM::new(&StubbedBackend, &extra_opcodes, witness_assignments); - let solver_status = acvm.solve(); - - prop_assert_eq!(acvm.witness_map().get(&q_w.get_inner()).unwrap(), &FieldElement::from(q as u128)); - prop_assert_eq!(acvm.witness_map().get(&r_w.get_inner()).unwrap(), &FieldElement::from(r as u128)); - prop_assert_eq!(solver_status, ACVMStatus::Solved, "should be fully solved"); - } - - #[test] - fn $add(x in 0..$u::MAX, y in 0..$u::MAX, z in 0..$u::MAX) { - let lhs = FieldElement::from(x as u128); - let rhs = FieldElement::from(y as u128); - let rhs_z = FieldElement::from(z as u128); - let result = FieldElement::from(((x as u128).wrapping_add(y as u128) % (1_u128 << $size)).wrapping_add(z as u128) % (1_u128 << $size)); - let w1 = Witness(1); - let w2 = Witness(2); - let w3 = Witness(3); - let u32_1 = $uint::new(w1); - let u32_2 = $uint::new(w2); - let u32_3 = $uint::new(w3); - let mut opcodes = Vec::new(); - let (w, extra_opcodes, num_witness) = u32_1.add(&u32_2, 4); - opcodes.extend(extra_opcodes); - let (w2, extra_opcodes, _) = w.add(&u32_3, num_witness); - opcodes.extend(extra_opcodes); - let witness_assignments = BTreeMap::from([(Witness(1), lhs), (Witness(2), rhs), (Witness(3), rhs_z)]).into(); - let mut acvm = ACVM::new(&StubbedBackend, &opcodes, witness_assignments); - let solver_status = acvm.solve(); - - prop_assert_eq!(acvm.witness_map().get(&w2.get_inner()).unwrap(), &result); - prop_assert_eq!(solver_status, ACVMStatus::Solved, "should be fully solved"); - } - - #[test] - fn $sub(x in 0..$u::MAX, y in 0..$u::MAX, z in 0..$u::MAX) { - let lhs = FieldElement::from(x as u128); - let rhs = FieldElement::from(y as u128); - let rhs_z = FieldElement::from(z as u128); - let result = FieldElement::from(((x as u128).wrapping_sub(y as u128) % (1_u128 << $size)).wrapping_sub(z as u128) % (1_u128 << $size)); - let w1 = Witness(1); - let w2 = Witness(2); - let w3 = Witness(3); - let u32_1 = $uint::new(w1); - let u32_2 = $uint::new(w2); - let u32_3 = $uint::new(w3); - let mut opcodes = Vec::new(); - let (w, extra_opcodes, num_witness) = u32_1.sub(&u32_2, 4); - opcodes.extend(extra_opcodes); - let (w2, extra_opcodes, _) = w.sub(&u32_3, num_witness); - opcodes.extend(extra_opcodes); - let witness_assignments = BTreeMap::from([(Witness(1), lhs), (Witness(2), rhs), (Witness(3), rhs_z)]).into(); - let mut acvm = ACVM::new(&StubbedBackend, &opcodes, witness_assignments); - let solver_status = acvm.solve(); - - prop_assert_eq!(acvm.witness_map().get(&w2.get_inner()).unwrap(), &result); - prop_assert_eq!(solver_status, ACVMStatus::Solved, "should be fully solved"); - } - - #[test] - fn $left_shift(x in 0..$u::MAX, y in 0..32_u32) { - let lhs = FieldElement::from(x as u128); - let w1 = Witness(1); - let result = x.overflowing_shl(y).0; - let u32_1 = $uint::new(w1); - let (w, extra_opcodes, _) = u32_1.leftshift(y, 2); - let witness_assignments = BTreeMap::from([(Witness(1), lhs)]).into(); - let mut acvm = ACVM::new(&StubbedBackend, &extra_opcodes, witness_assignments); - let solver_status = acvm.solve(); - - prop_assert_eq!(acvm.witness_map().get(&w.get_inner()).unwrap(), &FieldElement::from(result as u128)); - prop_assert_eq!(solver_status, ACVMStatus::Solved, "should be fully solved"); - } - - #[test] - fn $right_shift(x in 0..$u::MAX, y in 0..32_u32) { - let lhs = FieldElement::from(x as u128); - let w1 = Witness(1); - let result = x.overflowing_shr(y).0; - let u32_1 = $uint::new(w1); - let (w, extra_opcodes, _) = u32_1.rightshift(y, 2); - let witness_assignments = BTreeMap::from([(Witness(1), lhs)]).into(); - let mut acvm = ACVM::new(&StubbedBackend, &extra_opcodes, witness_assignments); - let solver_status = acvm.solve(); - - prop_assert_eq!(acvm.witness_map().get(&w.get_inner()).unwrap(), &FieldElement::from(result as u128)); - prop_assert_eq!(solver_status, ACVMStatus::Solved, "should be fully solved"); - } - - #[test] - fn $less_than(x in 0..$u::MAX, y in 0..$u::MAX) { - let lhs = FieldElement::from(x as u128); - let rhs = FieldElement::from(y as u128); - let w1 = Witness(1); - let w2 = Witness(2); - let result = x < y; - let u32_1 = $uint::new(w1); - let u32_2 = $uint::new(w2); - let (w, extra_opcodes, _) = u32_1.less_than_comparison(&u32_2, 3); - let witness_assignments = BTreeMap::from([(Witness(1), lhs), (Witness(2), rhs)]).into(); - let mut acvm = ACVM::new(&StubbedBackend, &extra_opcodes, witness_assignments); - let solver_status = acvm.solve(); - - prop_assert_eq!(acvm.witness_map().get(&w.get_inner()).unwrap(), &FieldElement::from(result as u128)); - prop_assert_eq!(solver_status, ACVMStatus::Solved, "should be fully solved"); - } - } - }; -} - -test_hashes!(test_sha256, sha256, SHA256, does_not_support_sha256); -test_hashes!(test_blake2s, blake2s, Blake2s, does_not_support_blake2s); -test_hashes!(test_keccak, keccak256, Keccak256, does_not_support_keccak); - -fn does_not_support_sha256(opcode: &Opcode) -> bool { - !matches!(opcode, Opcode::BlackBoxFuncCall(BlackBoxFuncCall::SHA256 { .. })) -} -fn does_not_support_blake2s(opcode: &Opcode) -> bool { - !matches!(opcode, Opcode::BlackBoxFuncCall(BlackBoxFuncCall::Blake2s { .. })) -} -fn does_not_support_keccak(opcode: &Opcode) -> bool { - !matches!(opcode, Opcode::BlackBoxFuncCall(BlackBoxFuncCall::Keccak256 { .. })) -} - -#[macro_export] -macro_rules! test_hashes { - ( - $name:ident, - $hasher:ident, - $opcode:ident, - $opcode_support: ident - ) => { - proptest! { - #![proptest_config(ProptestConfig::with_cases(3))] - #[test] - fn $name(input_values in proptest::collection::vec(0..u8::MAX, 1..50)) { - let mut opcodes = Vec::new(); - let mut witness_assignments = BTreeMap::new(); - let mut input_witnesses: Vec = Vec::new(); - let mut correct_result_witnesses: Vec = Vec::new(); - let mut output_witnesses: Vec = Vec::new(); - - // prepare test data - let mut counter = 0; - let output = $hasher(&input_values).unwrap(); - for inp_v in input_values { - counter += 1; - let function_input = FunctionInput { witness: Witness(counter), num_bits: 8 }; - input_witnesses.push(function_input); - witness_assignments.insert(Witness(counter), FieldElement::from(inp_v as u128)); - } - - for o_v in output { - counter += 1; - correct_result_witnesses.push(Witness(counter)); - witness_assignments.insert(Witness(counter), FieldElement::from(o_v as u128)); - } - - for _ in 0..32 { - counter += 1; - output_witnesses.push(Witness(counter)); - } - let blackbox = Opcode::BlackBoxFuncCall(BlackBoxFuncCall::$opcode { inputs: input_witnesses, outputs: output_witnesses.clone() }); - opcodes.push(blackbox); - - // constrain the output to be the same as the hasher - for i in 0..correct_result_witnesses.len() { - let mut output_constraint = Expression::from(correct_result_witnesses[i]); - output_constraint.push_addition_term(-FieldElement::one(), output_witnesses[i]); - opcodes.push(Opcode::Arithmetic(output_constraint)); - } - - // compile circuit - let circuit = Circuit { - current_witness_index: witness_assignments.len() as u32 + 32, - opcodes, - private_parameters: BTreeSet::new(), // This is not correct but is unused in this test. - ..Circuit::default() - }; - let circuit = compile(circuit, Language::PLONKCSat{ width: 3 }, $opcode_support).unwrap().0; - - // solve witnesses - let mut acvm = ACVM::new(&StubbedBackend, &circuit.opcodes, witness_assignments.into()); - let solver_status = acvm.solve(); - - prop_assert_eq!(solver_status, ACVMStatus::Solved, "should be fully solved"); - } - } - }; -} - -fn does_not_support_hash_to_field(opcode: &Opcode) -> bool { - !matches!(opcode, Opcode::BlackBoxFuncCall(BlackBoxFuncCall::HashToField128Security { .. })) -} - -proptest! { - #![proptest_config(ProptestConfig::with_cases(3))] - #[test] - fn test_hash_to_field(input_values in proptest::collection::vec(0..u8::MAX, 1..50)) { - let mut opcodes = Vec::new(); - let mut witness_assignments = BTreeMap::new(); - let mut input_witnesses: Vec = Vec::new(); - - // prepare test data - let mut counter = 0; - let output = hash_to_field_128_security(&input_values).unwrap(); - for inp_v in input_values { - counter += 1; - let function_input = FunctionInput { witness: Witness(counter), num_bits: 8 }; - input_witnesses.push(function_input); - witness_assignments.insert(Witness(counter), FieldElement::from(inp_v as u128)); - } - - counter += 1; - let correct_result_witnesses: Witness = Witness(counter); - witness_assignments.insert(Witness(counter), output); - - counter += 1; - let output_witness: Witness = Witness(counter); - - let blackbox = Opcode::BlackBoxFuncCall(BlackBoxFuncCall::HashToField128Security { inputs: input_witnesses, output: output_witness }); - opcodes.push(blackbox); - - // constrain the output to be the same as the hasher - let mut output_constraint = Expression::from(correct_result_witnesses); - output_constraint.push_addition_term(-FieldElement::one(), output_witness); - opcodes.push(Opcode::Arithmetic(output_constraint)); - - // compile circuit - let circuit = Circuit { - current_witness_index: witness_assignments.len() as u32 + 1, - opcodes, - private_parameters: BTreeSet::new(), // This is not correct but is unused in this test. - ..Circuit::default() - }; - let circuit = compile(circuit, Language::PLONKCSat{ width: 3 }, does_not_support_hash_to_field).unwrap().0; - - // solve witnesses - let mut acvm = ACVM::new(&StubbedBackend, &circuit.opcodes, witness_assignments.into()); - let solver_status = acvm.solve(); - - prop_assert_eq!(solver_status, ACVMStatus::Solved, "should be fully solved"); - } -} diff --git a/acvm-repo/acvm_js/Cargo.toml b/acvm-repo/acvm_js/Cargo.toml index f6054b49de1..22bd6e5aa7e 100644 --- a/acvm-repo/acvm_js/Cargo.toml +++ b/acvm-repo/acvm_js/Cargo.toml @@ -2,7 +2,7 @@ name = "acvm_js" description = "Typescript wrapper around the ACVM allowing execution of ACIR code" # x-release-please-start-version -version = "0.37.0" +version = "0.38.0" # x-release-please-end authors.workspace = true edition.workspace = true diff --git a/acvm-repo/acvm_js/package.json b/acvm-repo/acvm_js/package.json index a890ca40080..9f265dd676a 100644 --- a/acvm-repo/acvm_js/package.json +++ b/acvm-repo/acvm_js/package.json @@ -1,6 +1,6 @@ { "name": "@noir-lang/acvm_js", - "version": "0.37.0", + "version": "0.38.0", "repository": { "type": "git", "url": "https://github.com/noir-lang/acvm.git" diff --git a/acvm-repo/barretenberg_blackbox_solver/Cargo.toml b/acvm-repo/barretenberg_blackbox_solver/Cargo.toml index bcf02eeab09..01f5e6ca950 100644 --- a/acvm-repo/barretenberg_blackbox_solver/Cargo.toml +++ b/acvm-repo/barretenberg_blackbox_solver/Cargo.toml @@ -2,7 +2,7 @@ name = "barretenberg_blackbox_solver" description = "A wrapper around a barretenberg WASM binary to execute black box functions for which there is no rust implementation" # x-release-please-start-version -version = "0.37.0" +version = "0.38.0" # x-release-please-end authors.workspace = true edition.workspace = true diff --git a/acvm-repo/blackbox_solver/Cargo.toml b/acvm-repo/blackbox_solver/Cargo.toml index aaff52c3c04..be2a58417f4 100644 --- a/acvm-repo/blackbox_solver/Cargo.toml +++ b/acvm-repo/blackbox_solver/Cargo.toml @@ -2,7 +2,7 @@ name = "acvm_blackbox_solver" description = "A solver for the blackbox functions found in ACIR and Brillig" # x-release-please-start-version -version = "0.37.0" +version = "0.38.0" # x-release-please-end authors.workspace = true edition.workspace = true diff --git a/acvm-repo/blackbox_solver/src/lib.rs b/acvm-repo/blackbox_solver/src/lib.rs index 13d0f562415..8b7c6343962 100644 --- a/acvm-repo/blackbox_solver/src/lib.rs +++ b/acvm-repo/blackbox_solver/src/lib.rs @@ -16,8 +16,6 @@ use thiserror::Error; #[derive(Clone, PartialEq, Eq, Debug, Error)] pub enum BlackBoxResolutionError { - #[error("unsupported blackbox function: {0}")] - Unsupported(BlackBoxFunc), #[error("failed to solve blackbox function: {0}, reason: {1}")] Failed(BlackBoxFunc, String), } diff --git a/acvm-repo/brillig/Cargo.toml b/acvm-repo/brillig/Cargo.toml index 47f9b3f429e..ee8651faeec 100644 --- a/acvm-repo/brillig/Cargo.toml +++ b/acvm-repo/brillig/Cargo.toml @@ -2,7 +2,7 @@ name = "brillig" description = "Brillig is the bytecode ACIR uses for non-determinism." # x-release-please-start-version -version = "0.37.0" +version = "0.38.0" # x-release-please-end authors.workspace = true edition.workspace = true diff --git a/acvm-repo/brillig/src/opcodes.rs b/acvm-repo/brillig/src/opcodes.rs index 44d90acde47..79295cc6e5d 100644 --- a/acvm-repo/brillig/src/opcodes.rs +++ b/acvm-repo/brillig/src/opcodes.rs @@ -133,28 +133,6 @@ pub enum BrilligOpcode { Stop, } -impl BrilligOpcode { - pub fn name(&self) -> &'static str { - match self { - BrilligOpcode::BinaryFieldOp { .. } => "binary_field_op", - BrilligOpcode::BinaryIntOp { .. } => "binary_int_op", - BrilligOpcode::JumpIfNot { .. } => "jmp_if_not", - BrilligOpcode::JumpIf { .. } => "jmp_if", - BrilligOpcode::Jump { .. } => "jmp", - BrilligOpcode::Call { .. } => "call", - BrilligOpcode::Const { .. } => "const", - BrilligOpcode::Return => "return", - BrilligOpcode::ForeignCall { .. } => "foreign_call", - BrilligOpcode::Mov { .. } => "mov", - BrilligOpcode::Load { .. } => "load", - BrilligOpcode::Store { .. } => "store", - BrilligOpcode::BlackBox(_) => "black_box", - BrilligOpcode::Trap => "trap", - BrilligOpcode::Stop => "stop", - } - } -} - /// Binary fixed-length field expressions #[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)] pub enum BinaryFieldOp { diff --git a/acvm-repo/brillig_vm/Cargo.toml b/acvm-repo/brillig_vm/Cargo.toml index 8c533352dd5..91bef2572bb 100644 --- a/acvm-repo/brillig_vm/Cargo.toml +++ b/acvm-repo/brillig_vm/Cargo.toml @@ -2,7 +2,7 @@ name = "brillig_vm" description = "The virtual machine that processes Brillig bytecode, used to introduce non-determinism to the ACVM" # x-release-please-start-version -version = "0.37.0" +version = "0.38.0" # x-release-please-end authors.workspace = true edition.workspace = true diff --git a/acvm-repo/stdlib/CHANGELOG.md b/acvm-repo/stdlib/CHANGELOG.md deleted file mode 100644 index bea80c95d1e..00000000000 --- a/acvm-repo/stdlib/CHANGELOG.md +++ /dev/null @@ -1,350 +0,0 @@ -# Changelog - -## [0.27.0](https://github.com/noir-lang/acvm/compare/acvm_stdlib-v0.26.1...acvm_stdlib-v0.27.0) (2023-09-19) - - -### Miscellaneous Chores - -* **acvm_stdlib:** Synchronize acvm versions - -## [0.26.1](https://github.com/noir-lang/acvm/compare/acvm_stdlib-v0.26.0...acvm_stdlib-v0.26.1) (2023-09-12) - - -### Miscellaneous Chores - -* **acvm_stdlib:** Synchronize acvm versions - -## [0.26.0](https://github.com/noir-lang/acvm/compare/acvm_stdlib-v0.25.0...acvm_stdlib-v0.26.0) (2023-09-07) - - -### Miscellaneous Chores - -* **acvm_stdlib:** Synchronize acvm versions - -## [0.25.0](https://github.com/noir-lang/acvm/compare/acvm_stdlib-v0.24.1...acvm_stdlib-v0.25.0) (2023-09-04) - - -### Miscellaneous Chores - -* **acvm_stdlib:** Synchronize acvm versions - -## [0.24.1](https://github.com/noir-lang/acvm/compare/acvm_stdlib-v0.24.0...acvm_stdlib-v0.24.1) (2023-09-03) - - -### Miscellaneous Chores - -* **acvm_stdlib:** Synchronize acvm versions - -## [0.24.0](https://github.com/noir-lang/acvm/compare/acvm_stdlib-v0.23.0...acvm_stdlib-v0.24.0) (2023-08-31) - - -### Miscellaneous Chores - -* **acvm_stdlib:** Synchronize acvm versions - -## [0.23.0](https://github.com/noir-lang/acvm/compare/acvm_stdlib-v0.22.0...acvm_stdlib-v0.23.0) (2023-08-30) - - -### Miscellaneous Chores - -* **acvm_stdlib:** Synchronize acvm versions - -## [0.22.0](https://github.com/noir-lang/acvm/compare/acvm_stdlib-v0.21.0...acvm_stdlib-v0.22.0) (2023-08-18) - - -### Miscellaneous Chores - -* **acvm_stdlib:** Synchronize acvm versions - -## [0.21.0](https://github.com/noir-lang/acvm/compare/acvm_stdlib-v0.20.1...acvm_stdlib-v0.21.0) (2023-07-26) - - -### Miscellaneous Chores - -* **acvm_stdlib:** Synchronize acvm versions - -## [0.20.1](https://github.com/noir-lang/acvm/compare/acvm_stdlib-v0.20.0...acvm_stdlib-v0.20.1) (2023-07-26) - - -### Features - -* add optimisations to fallback black box functions on booleans ([#446](https://github.com/noir-lang/acvm/issues/446)) ([2cfb2a8](https://github.com/noir-lang/acvm/commit/2cfb2a8cf911a81eedbd9da13ab2c616abd67f83)) -* **stdlib:** Add fallback implementation of `Keccak256` black box function ([#445](https://github.com/noir-lang/acvm/issues/445)) ([f7ebb03](https://github.com/noir-lang/acvm/commit/f7ebb03653c971f119700ff8126d9eb5ff01be0f)) - -## [0.20.0](https://github.com/noir-lang/acvm/compare/acvm_stdlib-v0.19.1...acvm_stdlib-v0.20.0) (2023-07-20) - - -### Features - -* **stdlib:** Add fallback implementation of `HashToField128Security` black box function ([#435](https://github.com/noir-lang/acvm/issues/435)) ([ed40f22](https://github.com/noir-lang/acvm/commit/ed40f228529e888d1960bfa70cb92b277e24b37f)) - -## [0.19.1](https://github.com/noir-lang/acvm/compare/acvm_stdlib-v0.19.0...acvm_stdlib-v0.19.1) (2023-07-17) - - -### Miscellaneous Chores - -* **acvm_stdlib:** Synchronize acvm versions - -## [0.19.0](https://github.com/noir-lang/acvm/compare/acvm_stdlib-v0.18.2...acvm_stdlib-v0.19.0) (2023-07-15) - - -### Miscellaneous Chores - -* **acvm_stdlib:** Synchronize acvm versions - -## [0.18.2](https://github.com/noir-lang/acvm/compare/acvm_stdlib-v0.18.1...acvm_stdlib-v0.18.2) (2023-07-12) - - -### Features - -* **stdlib:** Add fallback implementation of `Blake2s` black box function ([#424](https://github.com/noir-lang/acvm/issues/424)) ([982d940](https://github.com/noir-lang/acvm/commit/982d94087d46092ce7a5e94dbd7e732195f58e42)) - -## [0.18.1](https://github.com/noir-lang/acvm/compare/acvm_stdlib-v0.18.0...acvm_stdlib-v0.18.1) (2023-07-12) - - -### Miscellaneous Chores - -* **acvm_stdlib:** Synchronize acvm versions - -## [0.18.0](https://github.com/noir-lang/acvm/compare/acvm_stdlib-v0.17.0...acvm_stdlib-v0.18.0) (2023-07-12) - - -### Features - -* **stdlib:** Add fallback implementation of `SHA256` black box function ([#407](https://github.com/noir-lang/acvm/issues/407)) ([040369a](https://github.com/noir-lang/acvm/commit/040369adc8749fa5ec2edd255ff54c105c3140f5)) - -## [0.17.0](https://github.com/noir-lang/acvm/compare/acvm_stdlib-v0.16.0...acvm_stdlib-v0.17.0) (2023-07-07) - - -### Miscellaneous Chores - -* **acvm_stdlib:** Synchronize acvm versions - -## [0.16.0](https://github.com/noir-lang/acvm/compare/acvm_stdlib-v0.15.1...acvm_stdlib-v0.16.0) (2023-07-06) - - -### Miscellaneous Chores - -* **acvm_stdlib:** Synchronize acvm versions - -## [0.15.1](https://github.com/noir-lang/acvm/compare/acvm_stdlib-v0.15.0...acvm_stdlib-v0.15.1) (2023-06-20) - - -### Miscellaneous Chores - -* **acvm_stdlib:** Synchronize acvm versions - -## [0.15.0](https://github.com/noir-lang/acvm/compare/acvm_stdlib-v0.14.2...acvm_stdlib-v0.15.0) (2023-06-15) - - -### Miscellaneous Chores - -* **acvm_stdlib:** Synchronize acvm versions - -## [0.14.2](https://github.com/noir-lang/acvm/compare/acvm_stdlib-v0.14.1...acvm_stdlib-v0.14.2) (2023-06-08) - - -### Miscellaneous Chores - -* **acvm_stdlib:** Synchronize acvm versions - -## [0.14.1](https://github.com/noir-lang/acvm/compare/acvm_stdlib-v0.14.0...acvm_stdlib-v0.14.1) (2023-06-07) - - -### Miscellaneous Chores - -* **acvm_stdlib:** Synchronize acvm versions - -## [0.14.0](https://github.com/noir-lang/acvm/compare/acvm_stdlib-v0.13.3...acvm_stdlib-v0.14.0) (2023-06-06) - - -### Miscellaneous Chores - -* **acvm_stdlib:** Synchronize acvm versions - -## [0.13.3](https://github.com/noir-lang/acvm/compare/acvm_stdlib-v0.13.2...acvm_stdlib-v0.13.3) (2023-06-05) - - -### Bug Fixes - -* Empty commit to trigger release-please ([e8f0748](https://github.com/noir-lang/acvm/commit/e8f0748042ef505d59ab63266d3c36c5358ee30d)) - -## [0.13.2](https://github.com/noir-lang/acvm/compare/acvm_stdlib-v0.13.1...acvm_stdlib-v0.13.2) (2023-06-02) - - -### Miscellaneous Chores - -* **acvm_stdlib:** Synchronize acvm versions - -## [0.13.1](https://github.com/noir-lang/acvm/compare/acvm_stdlib-v0.13.0...acvm_stdlib-v0.13.1) (2023-06-01) - - -### Bug Fixes - -* **ci:** Correct typo to avoid `undefined` in changelogs ([#333](https://github.com/noir-lang/acvm/issues/333)) ([d3424c0](https://github.com/noir-lang/acvm/commit/d3424c04fd303c9cbe25d03118d8b358cbb84b83)) - -## [0.13.0](https://github.com/noir-lang/acvm/compare/acvm_stdlib-v0.12.0...acvm_stdlib-v0.13.0) (2023-06-01) - - -### Miscellaneous Chores - -* **acvm_stdlib:** Synchronize acvm versions - -## [0.12.0](https://github.com/noir-lang/acvm/compare/acvm_stdlib-v0.11.0...acvm_stdlib-v0.12.0) (2023-05-17) - - -### Miscellaneous Chores - -* **acvm_stdlib:** Synchronize acvm versions - -## [0.11.0](https://github.com/noir-lang/acvm/compare/acvm_stdlib-v0.10.3...acvm_stdlib-v0.11.0) (2023-05-04) - - -### Miscellaneous Chores - -* **acvm_stdlib:** Synchronize acvm versions - -## [0.10.3](https://github.com/noir-lang/acvm/compare/acvm_stdlib-v0.10.2...acvm_stdlib-v0.10.3) (2023-04-28) - - -### Miscellaneous Chores - -* **acvm_stdlib:** Synchronize acvm versions - -## [0.10.2](https://github.com/noir-lang/acvm/compare/acvm_stdlib-v0.10.1...acvm_stdlib-v0.10.2) (2023-04-28) - - -### Bug Fixes - -* add default flag to `acvm_stdlib` ([#242](https://github.com/noir-lang/acvm/issues/242)) ([83b6fa8](https://github.com/noir-lang/acvm/commit/83b6fa8302569add7e3ac8481b2fd2a6a1ff3576)) - -## [0.10.1](https://github.com/noir-lang/acvm/compare/acvm_stdlib-v0.10.0...acvm_stdlib-v0.10.1) (2023-04-28) - - -### Miscellaneous Chores - -* **acvm_stdlib:** Synchronize acvm versions - -## [0.10.0](https://github.com/noir-lang/acvm/compare/acvm_stdlib-v0.9.0...acvm_stdlib-v0.10.0) (2023-04-26) - - -### ⚠ BREAKING CHANGES - -* organise operator implementations for Expression ([#190](https://github.com/noir-lang/acvm/issues/190)) - -### Bug Fixes - -* prevent `bn254` feature flag always being enabled ([#225](https://github.com/noir-lang/acvm/issues/225)) ([82eee6a](https://github.com/noir-lang/acvm/commit/82eee6ab08ae480f04904ca8571fd88f4466c000)) - - -### Miscellaneous Chores - -* organise operator implementations for Expression ([#190](https://github.com/noir-lang/acvm/issues/190)) ([a619df6](https://github.com/noir-lang/acvm/commit/a619df614bbb9b2518b788b42a7553b069823a0f)) - -## [0.9.0](https://github.com/noir-lang/acvm/compare/acvm_stdlib-v0.8.1...acvm_stdlib-v0.9.0) (2023-04-07) - - -### Miscellaneous Chores - -* **acvm_stdlib:** Synchronize acvm versions - - -### Dependencies - -* The following workspace dependencies were updated - * dependencies - * acir bumped from 0.8.1 to 0.9.0 - -## [0.8.1](https://github.com/noir-lang/acvm/compare/acvm_stdlib-v0.8.0...acvm_stdlib-v0.8.1) (2023-03-30) - - -### Miscellaneous Chores - -* **acvm_stdlib:** Synchronize acvm versions - - -### Dependencies - -* The following workspace dependencies were updated - * dependencies - * acir bumped from 0.8.0 to 0.8.1 - -## [0.8.0](https://github.com/noir-lang/acvm/compare/acvm_stdlib-v0.7.1...acvm_stdlib-v0.8.0) (2023-03-28) - - -### Miscellaneous Chores - -* **acvm_stdlib:** Synchronize acvm versions - - -### Dependencies - -* The following workspace dependencies were updated - * dependencies - * acir bumped from 0.7.1 to 0.8.0 - -## [0.7.1](https://github.com/noir-lang/acvm/compare/acvm_stdlib-v0.7.0...acvm_stdlib-v0.7.1) (2023-03-27) - - -### Miscellaneous Chores - -* **acvm_stdlib:** Synchronize acvm versions - - -### Dependencies - -* The following workspace dependencies were updated - * dependencies - * acir bumped from 0.7.0 to 0.7.1 - -## [0.7.0](https://github.com/noir-lang/acvm/compare/acvm_stdlib-v0.6.0...acvm_stdlib-v0.7.0) (2023-03-23) - - -### Miscellaneous Chores - -* **acvm_stdlib:** Synchronize acvm versions - - -### Dependencies - -* The following workspace dependencies were updated - * dependencies - * acir bumped from 0.6.0 to 0.7.0 - -## [0.6.0](https://github.com/noir-lang/acvm/compare/acvm_stdlib-v0.5.0...acvm_stdlib-v0.6.0) (2023-03-03) - - -### ⚠ BREAKING CHANGES - -* **acir:** rename `term_addition` to `push_addition_term` -* **acir:** rename `term_multiplication` to `push_multiplication_term` ([#122](https://github.com/noir-lang/acvm/issues/122)) - -### Miscellaneous Chores - -* **acir:** rename `term_addition` to `push_addition_term` ([d389385](https://github.com/noir-lang/acvm/commit/d38938542851a97dc01727438391e6a65e44c689)) -* **acir:** rename `term_multiplication` to `push_multiplication_term` ([#122](https://github.com/noir-lang/acvm/issues/122)) ([d389385](https://github.com/noir-lang/acvm/commit/d38938542851a97dc01727438391e6a65e44c689)) - - -### Dependencies - -* The following workspace dependencies were updated - * dependencies - * acir bumped from 0.5.0 to 0.6.0 - -## [0.5.0](https://github.com/noir-lang/acvm/compare/acvm_stdlib-v0.4.1...acvm_stdlib-v0.5.0) (2023-02-22) - - -### ⚠ BREAKING CHANGES - -* refactor ToRadix to ToRadixLe and ToRadixBe ([#58](https://github.com/noir-lang/acvm/issues/58)) - -### Miscellaneous Chores - -* refactor ToRadix to ToRadixLe and ToRadixBe ([#58](https://github.com/noir-lang/acvm/issues/58)) ([2427a27](https://github.com/noir-lang/acvm/commit/2427a275048e598c6d651cce8348a4c55148f235)) - - -### Dependencies - -* The following workspace dependencies were updated - * dependencies - * acir bumped from 0.4.1 to 0.5.0 diff --git a/acvm-repo/stdlib/Cargo.toml b/acvm-repo/stdlib/Cargo.toml deleted file mode 100644 index de50d112df6..00000000000 --- a/acvm-repo/stdlib/Cargo.toml +++ /dev/null @@ -1,22 +0,0 @@ -[package] -name = "acvm_stdlib" -description = "The ACVM standard library." -# x-release-please-start-version -version = "0.37.0" -# x-release-please-end -authors.workspace = true -edition.workspace = true -license.workspace = true -rust-version.workspace = true -repository.workspace = true - -# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html - -[dependencies] -acir.workspace = true - -[features] -default = ["bn254"] -bn254 = ["acir/bn254"] -bls12_381 = ["acir/bls12_381"] -testing = ["bn254"] diff --git a/acvm-repo/stdlib/src/blackbox_fallbacks/blake2s.rs b/acvm-repo/stdlib/src/blackbox_fallbacks/blake2s.rs deleted file mode 100644 index 92bf93d2d56..00000000000 --- a/acvm-repo/stdlib/src/blackbox_fallbacks/blake2s.rs +++ /dev/null @@ -1,468 +0,0 @@ -//! Blake2s fallback function. -use super::{ - utils::{byte_decomposition, round_to_nearest_byte}, - UInt32, -}; -use acir::{ - circuit::Opcode, - native_types::{Expression, Witness}, - FieldElement, -}; -use std::vec; - -const BLAKE2S_BLOCKBYTES_USIZE: usize = 64; -const MSG_SCHEDULE_BLAKE2: [[usize; 16]; 10] = [ - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15], - [14, 10, 4, 8, 9, 15, 13, 6, 1, 12, 0, 2, 11, 7, 5, 3], - [11, 8, 12, 0, 5, 2, 15, 13, 10, 14, 3, 6, 7, 1, 9, 4], - [7, 9, 3, 1, 13, 12, 11, 14, 2, 6, 5, 10, 4, 0, 15, 8], - [9, 0, 5, 7, 2, 4, 10, 15, 14, 1, 11, 12, 6, 8, 3, 13], - [2, 12, 6, 10, 0, 11, 8, 3, 4, 13, 7, 5, 15, 14, 1, 9], - [12, 5, 1, 15, 14, 13, 4, 10, 0, 7, 6, 3, 9, 2, 8, 11], - [13, 11, 7, 14, 12, 1, 3, 9, 5, 0, 15, 4, 8, 6, 2, 10], - [6, 15, 14, 9, 11, 3, 0, 8, 12, 2, 13, 7, 1, 4, 10, 5], - [10, 2, 8, 4, 7, 6, 1, 5, 15, 11, 9, 14, 3, 12, 13, 0], -]; -const INITIAL_H: [u32; 8] = [ - 0x6b08e647, 0xbb67ae85, 0x3c6ef372, 0xa54ff53a, 0x510e527f, 0x9b05688c, 0x1f83d9ab, 0x5be0cd19, -]; -const IV_VALUE: [u32; 8] = [ - 0x6A09E667, 0xBB67AE85, 0x3C6EF372, 0xA54FF53A, 0x510E527F, 0x9B05688C, 0x1F83D9AB, 0x5BE0CD19, -]; - -pub fn blake2s( - inputs: Vec<(Expression, u32)>, - outputs: Vec, - mut num_witness: u32, -) -> (u32, Vec) { - let mut new_opcodes = Vec::new(); - let mut new_inputs = Vec::new(); - - // Decompose the input field elements into bytes and collect the resulting witnesses. - for (witness, num_bits) in inputs { - let num_bytes = round_to_nearest_byte(num_bits); - let (extra_opcodes, extra_inputs, updated_witness_counter) = - byte_decomposition(witness, num_bytes, num_witness); - new_opcodes.extend(extra_opcodes); - new_inputs.extend(extra_inputs); - num_witness = updated_witness_counter; - } - - let (result, num_witness, extra_opcodes) = create_blake2s_constraint(new_inputs, num_witness); - new_opcodes.extend(extra_opcodes); - - // constrain the outputs to be the same as the result of the circuit - for i in 0..outputs.len() { - let mut expr = Expression::from(outputs[i]); - expr.push_addition_term(-FieldElement::one(), result[i]); - new_opcodes.push(Opcode::Arithmetic(expr)); - } - (num_witness, new_opcodes) -} - -pub(crate) fn create_blake2s_constraint( - input: Vec, - num_witness: u32, -) -> (Vec, u32, Vec) { - let mut new_opcodes = Vec::new(); - - // prepare constants - let (mut blake2s_state, extra_opcodes, num_witness) = Blake2sState::init(num_witness); - new_opcodes.extend(extra_opcodes); - let (blake2s_constants, extra_opcodes, num_witness) = - Blake2sConstantsInCircuit::init(num_witness); - new_opcodes.extend(extra_opcodes); - let (blake2s_iv, extra_opcodes, mut num_witness) = Blake2sIV::init(num_witness); - new_opcodes.extend(extra_opcodes); - - let mut offset = 0; - let mut size = input.len(); - - while size > BLAKE2S_BLOCKBYTES_USIZE { - let (extra_opcodes, updated_witness_counter) = blake2s_increment_counter( - &mut blake2s_state, - &blake2s_constants.blake2s_blockbytes_uint32, - num_witness, - ); - new_opcodes.extend(extra_opcodes); - let (extra_opcodes, updated_witness_counter) = blake2s_compress( - &mut blake2s_state, - &blake2s_iv, - input.get(offset..offset + BLAKE2S_BLOCKBYTES_USIZE).unwrap(), - updated_witness_counter, - ); - new_opcodes.extend(extra_opcodes); - offset += BLAKE2S_BLOCKBYTES_USIZE; - size -= BLAKE2S_BLOCKBYTES_USIZE; - num_witness = updated_witness_counter; - } - - let (u32_max, extra_opcodes, mut num_witness) = UInt32::load_constant(u32::MAX, num_witness); - new_opcodes.extend(extra_opcodes); - blake2s_state.f[0] = u32_max; - - // pad final block - let mut final_block = input.get(offset..).unwrap().to_vec(); - for _ in 0..BLAKE2S_BLOCKBYTES_USIZE - final_block.len() { - let (pad, extra_opcodes, updated_witness_counter) = - UInt32::load_constant(0_u32, num_witness); - new_opcodes.extend(extra_opcodes); - final_block.push(pad.inner); - num_witness = updated_witness_counter; - } - - let (size_w, extra_opcodes, num_witness) = UInt32::load_constant(size as u32, num_witness); - new_opcodes.extend(extra_opcodes); - let (extra_opcodes, num_witness) = - blake2s_increment_counter(&mut blake2s_state, &size_w, num_witness); - new_opcodes.extend(extra_opcodes); - - let (extra_opcodes, num_witness) = - blake2s_compress(&mut blake2s_state, &blake2s_iv, &final_block, num_witness); - new_opcodes.extend(extra_opcodes); - - // decompose the result bytes in u32 to u8 - let (extra_opcodes, mut byte1, num_witness) = - byte_decomposition(Expression::from(blake2s_state.h[0].inner), 4, num_witness); - new_opcodes.extend(extra_opcodes); - let (extra_opcodes, mut byte2, num_witness) = - byte_decomposition(Expression::from(blake2s_state.h[1].inner), 4, num_witness); - new_opcodes.extend(extra_opcodes); - let (extra_opcodes, mut byte3, num_witness) = - byte_decomposition(Expression::from(blake2s_state.h[2].inner), 4, num_witness); - new_opcodes.extend(extra_opcodes); - let (extra_opcodes, mut byte4, num_witness) = - byte_decomposition(Expression::from(blake2s_state.h[3].inner), 4, num_witness); - new_opcodes.extend(extra_opcodes); - let (extra_opcodes, mut byte5, num_witness) = - byte_decomposition(Expression::from(blake2s_state.h[4].inner), 4, num_witness); - new_opcodes.extend(extra_opcodes); - let (extra_opcodes, mut byte6, num_witness) = - byte_decomposition(Expression::from(blake2s_state.h[5].inner), 4, num_witness); - new_opcodes.extend(extra_opcodes); - let (extra_opcodes, mut byte7, num_witness) = - byte_decomposition(Expression::from(blake2s_state.h[6].inner), 4, num_witness); - new_opcodes.extend(extra_opcodes); - let (extra_opcodes, mut byte8, num_witness) = - byte_decomposition(Expression::from(blake2s_state.h[7].inner), 4, num_witness); - new_opcodes.extend(extra_opcodes); - - byte1.reverse(); - byte2.reverse(); - byte3.reverse(); - byte4.reverse(); - byte5.reverse(); - byte6.reverse(); - byte7.reverse(); - byte8.reverse(); - - let result = vec![byte1, byte2, byte3, byte4, byte5, byte6, byte7, byte8] - .into_iter() - .flatten() - .collect(); - - (result, num_witness, new_opcodes) -} - -fn blake2s_increment_counter( - state: &mut Blake2sState, - inc: &UInt32, - num_witness: u32, -) -> (Vec, u32) { - let mut new_opcodes = Vec::new(); - - // t0 + inc - let (state_t0, extra_opcodes, num_witness) = state.t[0].add(inc, num_witness); - new_opcodes.extend(extra_opcodes); - state.t[0] = state_t0; - - // t1 + (t0 < inc) - let (to_inc, extra_opcodes, num_witness) = state.t[0].less_than_comparison(inc, num_witness); - new_opcodes.extend(extra_opcodes); - let (state_t1, extra_opcodes, num_witness) = state.t[1].add(&to_inc, num_witness); - new_opcodes.extend(extra_opcodes); - state.t[1] = state_t1; - - (new_opcodes, num_witness) -} - -fn blake2s_compress( - state: &mut Blake2sState, - blake2s_iv: &Blake2sIV, - input: &[Witness], - mut num_witness: u32, -) -> (Vec, u32) { - let mut new_opcodes = Vec::new(); - let mut m = Vec::new(); - let mut v = Vec::new(); - - for i in 0..16 { - let mut mi_bytes = input.get(i * 4..i * 4 + 4).unwrap().to_vec(); - mi_bytes.reverse(); - let (mi, extra_opcodes, updated_witness_counter) = - UInt32::from_witnesses(&mi_bytes, num_witness); - new_opcodes.extend(extra_opcodes); - m.push(mi[0]); - num_witness = updated_witness_counter; - } - - for i in 0..8 { - v.push(state.h[i]); - } - - v.push(blake2s_iv.iv[0]); - v.push(blake2s_iv.iv[1]); - v.push(blake2s_iv.iv[2]); - v.push(blake2s_iv.iv[3]); - let (v12, extra_opcodes, num_witness) = state.t[0].xor(&blake2s_iv.iv[4], num_witness); - new_opcodes.extend(extra_opcodes); - v.push(v12); - let (v13, extra_opcodes, num_witness) = state.t[1].xor(&blake2s_iv.iv[5], num_witness); - new_opcodes.extend(extra_opcodes); - v.push(v13); - let (v14, extra_opcodes, num_witness) = state.f[0].xor(&blake2s_iv.iv[6], num_witness); - new_opcodes.extend(extra_opcodes); - v.push(v14); - let (v15, extra_opcodes, num_witness) = state.f[1].xor(&blake2s_iv.iv[7], num_witness); - new_opcodes.extend(extra_opcodes); - v.push(v15); - - let (extra_opcodes, num_witness) = blake2s_round(&mut v, &m, 0, num_witness); - new_opcodes.extend(extra_opcodes); - let (extra_opcodes, num_witness) = blake2s_round(&mut v, &m, 1, num_witness); - new_opcodes.extend(extra_opcodes); - let (extra_opcodes, num_witness) = blake2s_round(&mut v, &m, 2, num_witness); - new_opcodes.extend(extra_opcodes); - let (extra_opcodes, num_witness) = blake2s_round(&mut v, &m, 3, num_witness); - new_opcodes.extend(extra_opcodes); - let (extra_opcodes, num_witness) = blake2s_round(&mut v, &m, 4, num_witness); - new_opcodes.extend(extra_opcodes); - let (extra_opcodes, num_witness) = blake2s_round(&mut v, &m, 5, num_witness); - new_opcodes.extend(extra_opcodes); - let (extra_opcodes, num_witness) = blake2s_round(&mut v, &m, 6, num_witness); - new_opcodes.extend(extra_opcodes); - let (extra_opcodes, num_witness) = blake2s_round(&mut v, &m, 7, num_witness); - new_opcodes.extend(extra_opcodes); - let (extra_opcodes, num_witness) = blake2s_round(&mut v, &m, 8, num_witness); - new_opcodes.extend(extra_opcodes); - let (extra_opcodes, mut num_witness) = blake2s_round(&mut v, &m, 9, num_witness); - new_opcodes.extend(extra_opcodes); - - for i in 0..8 { - let (a, extra_opcodes, updated_witness_counter) = state.h[i].xor(&v[i], num_witness); - new_opcodes.extend(extra_opcodes); - let (state_hi, extra_opcodes, updated_witness_counter) = - a.xor(&v[i + 8], updated_witness_counter); - new_opcodes.extend(extra_opcodes); - state.h[i] = state_hi; - num_witness = updated_witness_counter; - } - - (new_opcodes, num_witness) -} - -fn blake2s_round( - state: &mut [UInt32], - msg: &[UInt32], - round: usize, - num_witness: u32, -) -> (Vec, u32) { - let mut new_opcodes = Vec::new(); - let schedule = &MSG_SCHEDULE_BLAKE2[round]; - - // Mix the columns. - let (extra_opcodes, num_witness) = - g(state, 0, 4, 8, 12, msg[schedule[0]], msg[schedule[1]], num_witness); - new_opcodes.extend(extra_opcodes); - let (extra_opcodes, num_witness) = - g(state, 1, 5, 9, 13, msg[schedule[2]], msg[schedule[3]], num_witness); - new_opcodes.extend(extra_opcodes); - let (extra_opcodes, num_witness) = - g(state, 2, 6, 10, 14, msg[schedule[4]], msg[schedule[5]], num_witness); - new_opcodes.extend(extra_opcodes); - let (extra_opcodes, num_witness) = - g(state, 3, 7, 11, 15, msg[schedule[6]], msg[schedule[7]], num_witness); - new_opcodes.extend(extra_opcodes); - - // Mix the rows. - let (extra_opcodes, num_witness) = - g(state, 0, 5, 10, 15, msg[schedule[8]], msg[schedule[9]], num_witness); - new_opcodes.extend(extra_opcodes); - let (extra_opcodes, num_witness) = - g(state, 1, 6, 11, 12, msg[schedule[10]], msg[schedule[11]], num_witness); - new_opcodes.extend(extra_opcodes); - let (extra_opcodes, num_witness) = - g(state, 2, 7, 8, 13, msg[schedule[12]], msg[schedule[13]], num_witness); - new_opcodes.extend(extra_opcodes); - let (extra_opcodes, num_witness) = - g(state, 3, 4, 9, 14, msg[schedule[14]], msg[schedule[15]], num_witness); - new_opcodes.extend(extra_opcodes); - - (new_opcodes, num_witness) -} - -#[allow(clippy::too_many_arguments)] -fn g( - state: &mut [UInt32], - a: usize, - b: usize, - c: usize, - d: usize, - x: UInt32, - y: UInt32, - num_witness: u32, -) -> (Vec, u32) { - let mut new_opcodes = Vec::new(); - - // calculate state[a] as `state[a] + state[b] + x` - let (state_a_1, extra_opcodes, num_witness) = state[a].add(&state[b], num_witness); - new_opcodes.extend(extra_opcodes); - let (state_a, extra_opcodes, num_witness) = state_a_1.add(&x, num_witness); - new_opcodes.extend(extra_opcodes); - state[a] = state_a; - - // calculate state[d] as `(state[d] ^ state[a]).ror(16)` - let (state_d_1, extra_opcodes, num_witness) = state[d].xor(&state[a], num_witness); - new_opcodes.extend(extra_opcodes); - let (state_d, extra_opcodes, num_witness) = state_d_1.ror(16, num_witness); - new_opcodes.extend(extra_opcodes); - state[d] = state_d; - - // calculate state[c] as `state[c] + state[d]` - let (state_c, extra_opcodes, num_witness) = state[c].add(&state[d], num_witness); - new_opcodes.extend(extra_opcodes); - state[c] = state_c; - - // caclulate state[b] as `(state[b] ^ state[c]).ror(12)` - let (state_b_1, extra_opcodes, num_witness) = state[b].xor(&state[c], num_witness); - new_opcodes.extend(extra_opcodes); - let (state_b, extra_opcodes, num_witness) = state_b_1.ror(12, num_witness); - new_opcodes.extend(extra_opcodes); - state[b] = state_b; - - // calculate state[a] as `state[a] + state[b] + y` - let (state_a_1, extra_opcodes, num_witness) = state[a].add(&state[b], num_witness); - new_opcodes.extend(extra_opcodes); - let (state_a, extra_opcodes, num_witness) = state_a_1.add(&y, num_witness); - new_opcodes.extend(extra_opcodes); - state[a] = state_a; - - // calculate state[d] as `(state[d] ^ state[a]).ror(8)` - let (state_d_1, extra_opcodes, num_witness) = state[d].xor(&state[a], num_witness); - new_opcodes.extend(extra_opcodes); - let (state_d, extra_opcodes, num_witness) = state_d_1.ror(8, num_witness); - new_opcodes.extend(extra_opcodes); - state[d] = state_d; - - // calculate state[c] as `state[c] + state[d]` - let (state_c, extra_opcodes, num_witness) = state[c].add(&state[d], num_witness); - new_opcodes.extend(extra_opcodes); - state[c] = state_c; - - // caclulate state[b] as `(state[b] ^ state[c]).ror(7)` - let (state_b_1, extra_opcodes, num_witness) = state[b].xor(&state[c], num_witness); - new_opcodes.extend(extra_opcodes); - let (state_b, extra_opcodes, num_witness) = state_b_1.ror(7, num_witness); - new_opcodes.extend(extra_opcodes); - state[b] = state_b; - - (new_opcodes, num_witness) -} - -/// Blake2s state `h` `t` and `f` -#[derive(Debug)] -struct Blake2sState { - h: Vec, - t: Vec, - f: Vec, -} - -impl Blake2sState { - fn new(h: Vec, t: Vec, f: Vec) -> Self { - Blake2sState { h, t, f } - } - - /// Initialize internal state of Blake2s - fn init(mut num_witness: u32) -> (Blake2sState, Vec, u32) { - let mut new_opcodes = Vec::new(); - let mut h = Vec::new(); - let mut t = Vec::new(); - let mut f = Vec::new(); - - for init_h in INITIAL_H { - let (new_witness, extra_opcodes, updated_witness_counter) = - UInt32::load_constant(init_h, num_witness); - new_opcodes.extend(extra_opcodes); - h.push(new_witness); - num_witness = updated_witness_counter; - } - - for _ in 0..2 { - let (new_witness, extra_opcodes, updated_witness_counter) = - UInt32::load_constant(0_u32, num_witness); - new_opcodes.extend(extra_opcodes); - t.push(new_witness); - num_witness = updated_witness_counter; - } - - for _ in 0..2 { - let (new_witness, extra_opcodes, updated_witness_counter) = - UInt32::load_constant(0_u32, num_witness); - new_opcodes.extend(extra_opcodes); - f.push(new_witness); - num_witness = updated_witness_counter; - } - - let blake2s_state = Blake2sState::new(h, t, f); - - (blake2s_state, new_opcodes, num_witness) - } -} - -/// Blake2s IV (Initialization Vector) -struct Blake2sIV { - iv: Vec, -} - -impl Blake2sIV { - fn new(iv: Vec) -> Self { - Blake2sIV { iv } - } - - /// Initialize IV of Blake2s - fn init(mut num_witness: u32) -> (Blake2sIV, Vec, u32) { - let mut new_opcodes = Vec::new(); - let mut iv = Vec::new(); - - for iv_v in IV_VALUE { - let (new_witness, extra_opcodes, updated_witness_counter) = - UInt32::load_constant(iv_v, num_witness); - new_opcodes.extend(extra_opcodes); - iv.push(new_witness); - num_witness = updated_witness_counter; - } - - let blake2s_iv = Blake2sIV::new(iv); - - (blake2s_iv, new_opcodes, num_witness) - } -} - -struct Blake2sConstantsInCircuit { - blake2s_blockbytes_uint32: UInt32, -} - -impl Blake2sConstantsInCircuit { - fn new(blake2s_blockbytes_uint32: UInt32) -> Self { - Blake2sConstantsInCircuit { blake2s_blockbytes_uint32 } - } - - fn init(num_witness: u32) -> (Blake2sConstantsInCircuit, Vec, u32) { - let mut new_opcodes = Vec::new(); - let (blake2s_blockbytes_uint32, extra_opcodes, num_witness) = - UInt32::load_constant(64_u32, num_witness); - new_opcodes.extend(extra_opcodes); - - (Blake2sConstantsInCircuit::new(blake2s_blockbytes_uint32), new_opcodes, num_witness) - } -} diff --git a/acvm-repo/stdlib/src/blackbox_fallbacks/hash_to_field.rs b/acvm-repo/stdlib/src/blackbox_fallbacks/hash_to_field.rs deleted file mode 100644 index 91a7cdd09e4..00000000000 --- a/acvm-repo/stdlib/src/blackbox_fallbacks/hash_to_field.rs +++ /dev/null @@ -1,168 +0,0 @@ -//! HashToField128Security fallback function. -use super::{ - blake2s::create_blake2s_constraint, - utils::{byte_decomposition, round_to_nearest_byte}, - UInt32, -}; -use crate::helpers::VariableStore; -use acir::{ - brillig::{self, RegisterIndex}, - circuit::{ - brillig::{Brillig, BrilligInputs, BrilligOutputs}, - Opcode, - }, - native_types::{Expression, Witness}, - FieldElement, -}; - -pub fn hash_to_field( - inputs: Vec<(Expression, u32)>, - outputs: Witness, - mut num_witness: u32, -) -> (u32, Vec) { - let mut new_opcodes = Vec::new(); - let mut new_inputs = Vec::new(); - - // Decompose the input field elements into bytes and collect the resulting witnesses. - for (witness, num_bits) in inputs { - let num_bytes = round_to_nearest_byte(num_bits); - let (extra_opcodes, extra_inputs, updated_witness_counter) = - byte_decomposition(witness, num_bytes, num_witness); - new_opcodes.extend(extra_opcodes); - new_inputs.extend(extra_inputs); - num_witness = updated_witness_counter; - } - - let (result, num_witness, extra_opcodes) = create_blake2s_constraint(new_inputs, num_witness); - new_opcodes.extend(extra_opcodes); - - // transform bytes to a single field - let (result, extra_opcodes, num_witness) = field_from_be_bytes(&result, num_witness); - new_opcodes.extend(extra_opcodes); - - // constrain the outputs to be the same as the result of the circuit - let mut expr = Expression::from(outputs); - expr.push_addition_term(-FieldElement::one(), result); - new_opcodes.push(Opcode::Arithmetic(expr)); - (num_witness, new_opcodes) -} - -/// Convert bytes represented by [Witness]es to a single [FieldElement] -fn field_from_be_bytes(result: &[Witness], num_witness: u32) -> (Witness, Vec, u32) { - let mut new_opcodes = Vec::new(); - - // Load `0` and `256` using the load constant function from UInt32 - let (new_witness, extra_opcodes, num_witness) = UInt32::load_constant(0, num_witness); - let mut new_witness = new_witness.inner; - new_opcodes.extend(extra_opcodes); - let (const_256, extra_opcodes, mut num_witness) = UInt32::load_constant(256, num_witness); - let const_256 = const_256.inner; - new_opcodes.extend(extra_opcodes); - - // add byte and multiply 256 each round - for r in result.iter().take(result.len() - 1) { - let (updated_witness, extra_opcodes, updated_witness_counter) = - field_addition(&new_witness, r, num_witness); - new_opcodes.extend(extra_opcodes); - let (updated_witness, extra_opcodes, updated_witness_counter) = - field_mul(&updated_witness, &const_256, updated_witness_counter); - new_opcodes.extend(extra_opcodes); - new_witness = updated_witness; - num_witness = updated_witness_counter; - } - - let (new_witness, extra_opcodes, num_witness) = - field_addition(&new_witness, &result[result.len() - 1], num_witness); - new_opcodes.extend(extra_opcodes); - - (new_witness, new_opcodes, num_witness) -} - -/// Caculate and constrain `self` + `rhs` as field -fn field_addition( - lhs: &Witness, - rhs: &Witness, - mut num_witness: u32, -) -> (Witness, Vec, u32) { - let mut new_opcodes = Vec::new(); - let mut variables = VariableStore::new(&mut num_witness); - let new_witness = variables.new_variable(); - - // calculate `self` + `rhs` as field - let brillig_opcode = Opcode::Brillig(Brillig { - inputs: vec![ - BrilligInputs::Single(Expression { - mul_terms: vec![], - linear_combinations: vec![(FieldElement::one(), *lhs)], - q_c: FieldElement::zero(), - }), - BrilligInputs::Single(Expression { - mul_terms: vec![], - linear_combinations: vec![(FieldElement::one(), *rhs)], - q_c: FieldElement::zero(), - }), - ], - outputs: vec![BrilligOutputs::Simple(new_witness)], - bytecode: vec![brillig::Opcode::BinaryFieldOp { - op: brillig::BinaryFieldOp::Add, - lhs: RegisterIndex::from(0), - rhs: RegisterIndex::from(1), - destination: RegisterIndex::from(0), - }], - predicate: None, - }); - new_opcodes.push(brillig_opcode); - let num_witness = variables.finalize(); - - // constrain addition - let mut add_expr = Expression::from(new_witness); - add_expr.push_addition_term(-FieldElement::one(), *lhs); - add_expr.push_addition_term(-FieldElement::one(), *rhs); - new_opcodes.push(Opcode::Arithmetic(add_expr)); - - (new_witness, new_opcodes, num_witness) -} - -/// Calculate and constrain `self` * `rhs` as field -pub(crate) fn field_mul( - lhs: &Witness, - rhs: &Witness, - mut num_witness: u32, -) -> (Witness, Vec, u32) { - let mut new_opcodes = Vec::new(); - let mut variables = VariableStore::new(&mut num_witness); - let new_witness = variables.new_variable(); - - // calulate `self` * `rhs` with overflow - let brillig_opcode = Opcode::Brillig(Brillig { - inputs: vec![ - BrilligInputs::Single(Expression { - mul_terms: vec![], - linear_combinations: vec![(FieldElement::one(), *lhs)], - q_c: FieldElement::zero(), - }), - BrilligInputs::Single(Expression { - mul_terms: vec![], - linear_combinations: vec![(FieldElement::one(), *rhs)], - q_c: FieldElement::zero(), - }), - ], - outputs: vec![BrilligOutputs::Simple(new_witness)], - bytecode: vec![brillig::Opcode::BinaryFieldOp { - op: brillig::BinaryFieldOp::Mul, - lhs: RegisterIndex::from(0), - rhs: RegisterIndex::from(1), - destination: RegisterIndex::from(0), - }], - predicate: None, - }); - new_opcodes.push(brillig_opcode); - let num_witness = variables.finalize(); - - // constrain mul - let mut mul_constraint = Expression::from(new_witness); - mul_constraint.push_multiplication_term(-FieldElement::one(), *lhs, *rhs); - new_opcodes.push(Opcode::Arithmetic(mul_constraint)); - - (new_witness, new_opcodes, num_witness) -} diff --git a/acvm-repo/stdlib/src/blackbox_fallbacks/keccak256.rs b/acvm-repo/stdlib/src/blackbox_fallbacks/keccak256.rs deleted file mode 100644 index d91db3dc2c6..00000000000 --- a/acvm-repo/stdlib/src/blackbox_fallbacks/keccak256.rs +++ /dev/null @@ -1,269 +0,0 @@ -//! Keccak256 fallback function. -use super::{ - sha256::pad, - uint8::UInt8, - utils::{byte_decomposition, round_to_nearest_byte}, - UInt64, -}; -use acir::{ - circuit::Opcode, - native_types::{Expression, Witness}, - FieldElement, -}; - -const STATE_NUM_BYTES: usize = 200; -const BITS: usize = 256; -const WORD_SIZE: usize = 8; -const BLOCK_SIZE: usize = (1600 - BITS * 2) / WORD_SIZE; -const ROUND_CONSTANTS: [u64; 24] = [ - 1, - 0x8082, - 0x800000000000808a, - 0x8000000080008000, - 0x808b, - 0x80000001, - 0x8000000080008081, - 0x8000000000008009, - 0x8a, - 0x88, - 0x80008009, - 0x8000000a, - 0x8000808b, - 0x800000000000008b, - 0x8000000000008089, - 0x8000000000008003, - 0x8000000000008002, - 0x8000000000000080, - 0x800a, - 0x800000008000000a, - 0x8000000080008081, - 0x8000000000008080, - 0x80000001, - 0x8000000080008008, -]; -const RHO: [u32; 24] = - [1, 3, 6, 10, 15, 21, 28, 36, 45, 55, 2, 14, 27, 41, 56, 8, 25, 43, 62, 18, 39, 61, 20, 44]; -const PI: [usize; 24] = - [10, 7, 11, 17, 18, 3, 5, 16, 8, 21, 24, 4, 15, 23, 19, 13, 12, 2, 20, 14, 22, 9, 6, 1]; - -pub fn keccak256( - inputs: Vec<(Expression, u32)>, - outputs: Vec, - mut num_witness: u32, -) -> (u32, Vec) { - let mut new_opcodes = Vec::new(); - let mut new_inputs = Vec::new(); - - // Decompose the input field elements into bytes and collect the resulting witnesses. - for (witness, num_bits) in inputs { - let num_bytes = round_to_nearest_byte(num_bits); - let (extra_opcodes, extra_inputs, updated_witness_counter) = - byte_decomposition(witness, num_bytes, num_witness); - new_opcodes.extend(extra_opcodes); - new_inputs.extend(extra_inputs); - num_witness = updated_witness_counter; - } - - let (result, num_witness, extra_opcodes) = create_keccak_constraint(new_inputs, num_witness); - new_opcodes.extend(extra_opcodes); - - // constrain the outputs to be the same as the result of the circuit - for i in 0..outputs.len() { - let mut expr = Expression::from(outputs[i]); - expr.push_addition_term(-FieldElement::one(), result[i]); - new_opcodes.push(Opcode::Arithmetic(expr)); - } - (num_witness, new_opcodes) -} - -fn create_keccak_constraint( - input: Vec, - num_witness: u32, -) -> (Vec, u32, Vec) { - let mut new_opcodes = Vec::new(); - let num_blocks = input.len() / BLOCK_SIZE + 1; - - // pad keccak - let (input, extra_opcodes, mut num_witness) = pad_keccak(input, num_blocks, num_witness); - new_opcodes.extend(extra_opcodes); - - // prepare state - let mut state = Vec::with_capacity(200); - for _ in 0..STATE_NUM_BYTES { - let (zero, extra_opcodes, updated_witness_counter) = UInt8::load_constant(0, num_witness); - new_opcodes.extend(extra_opcodes); - state.push(zero); - num_witness = updated_witness_counter; - } - - // process block - for i in 0..num_blocks { - for j in 0..BLOCK_SIZE { - let (new_state, extra_opcodes, updated_witness_counter) = - state[j].xor(&UInt8::new(input[i * BLOCK_SIZE + j]), num_witness); - new_opcodes.extend(extra_opcodes); - state[j] = new_state; - num_witness = updated_witness_counter; - } - let (new_state, extra_opcodes, updated_witness_counter) = keccakf(state, num_witness); - new_opcodes.extend(extra_opcodes); - num_witness = updated_witness_counter; - state = new_state; - } - - let result: Vec = state[..32].iter().map(|x| x.inner).collect(); - (result, num_witness, new_opcodes) -} - -fn keccakf(state: Vec, num_witness: u32) -> (Vec, Vec, u32) { - let mut new_opcodes = Vec::new(); - - // turn state into UInt64 - let mut state_witnesses: Vec = Vec::new(); - for i in 0..state.len() / 8 { - for j in 0..8 { - state_witnesses.push(state[i * 8 + (7 - j)].inner); - } - } - let (mut state_u64, extra_opcodes, mut num_witness) = - UInt64::from_witnesses(&state_witnesses, num_witness); - new_opcodes.extend(extra_opcodes); - - // process round - for round_constant in ROUND_CONSTANTS { - let (new_state_u64, extra_opcodes, updated_witness_counter) = - keccak_round(state_u64, round_constant, num_witness); - state_u64 = new_state_u64; - new_opcodes.extend(extra_opcodes); - num_witness = updated_witness_counter; - } - - // turn state back to UInt8 - let state_u64_witnesses: Vec = state_u64.into_iter().map(|x| x.inner).collect(); - let mut state_u8 = Vec::with_capacity(state_u64_witnesses.len()); - for state_u64_witness in state_u64_witnesses { - let (extra_opcodes, mut u8s, updated_witness_counter) = - byte_decomposition(Expression::from(state_u64_witness), 8, num_witness); - new_opcodes.extend(extra_opcodes); - u8s.reverse(); - state_u8.push(u8s); - num_witness = updated_witness_counter; - } - - let state_u8: Vec = state_u8.into_iter().flatten().map(UInt8::new).collect(); - (state_u8, new_opcodes, num_witness) -} - -fn keccak_round( - mut a: Vec, - round_const: u64, - mut num_witness: u32, -) -> (Vec, Vec, u32) { - let mut new_opcodes = Vec::new(); - - // theta - let mut array = Vec::with_capacity(5); - for _ in 0..5 { - let (zero, extra_opcodes, updated_witness_counter) = UInt64::load_constant(0, num_witness); - array.push(zero); - new_opcodes.extend(extra_opcodes); - num_witness = updated_witness_counter; - } - for x in 0..5 { - for y_count in 0..5 { - let y = y_count * 5; - let (new_array_ele, extra_opcodes, updated_witness_counter) = - array[x].xor(&a[x + y], num_witness); - new_opcodes.extend(extra_opcodes); - num_witness = updated_witness_counter; - array[x] = new_array_ele; - } - } - for x in 0..5 { - for y_count in 0..5 { - let y = y_count * 5; - let (a_ele, extra_opcodes, updated_witness_counter) = - array[(x + 1) % 5].rol(1, num_witness); - new_opcodes.extend(extra_opcodes); - let (b_ele, extra_opcodes, updated_witness_counter) = - array[(x + 4) % 5].xor(&a_ele, updated_witness_counter); - new_opcodes.extend(extra_opcodes); - let (new_array_ele, extra_opcodes, updated_witness_counter) = - a[x + y].xor(&b_ele, updated_witness_counter); - new_opcodes.extend(extra_opcodes); - num_witness = updated_witness_counter; - a[x + y] = new_array_ele; - } - } - - // rho and pi - let mut last = a[1]; - for x in 0..24 { - array[0] = a[PI[x]]; - let (a_ele, extra_opcodes, updated_witness_counter) = last.rol(RHO[x], num_witness); - new_opcodes.extend(extra_opcodes); - a[PI[x]] = a_ele; - num_witness = updated_witness_counter; - last = array[0]; - } - - // chi - for y_step in 0..5 { - let y = y_step * 5; - - array[..5].copy_from_slice(&a[y..(5 + y)]); - - for x in 0..5 { - let (a_ele, extra_opcodes, updated_witness_counter) = - array[(x + 1) % 5].not(num_witness); - new_opcodes.extend(extra_opcodes); - let (b_ele, extra_opcodes, updated_witness_counter) = - a_ele.and(&array[(x + 2) % 5], updated_witness_counter); - new_opcodes.extend(extra_opcodes); - let (c_ele, extra_opcodes, updated_witness_counter) = - array[x].xor(&b_ele, updated_witness_counter); - new_opcodes.extend(extra_opcodes); - - a[y + x] = c_ele; - num_witness = updated_witness_counter; - } - } - - // iota - let (rc, extra_opcodes, num_witness) = UInt64::load_constant(round_const, num_witness); - new_opcodes.extend(extra_opcodes); - let (a_ele, extra_opcodes, num_witness) = a[0].xor(&rc, num_witness); - new_opcodes.extend(extra_opcodes); - a[0] = a_ele; - - (a, new_opcodes, num_witness) -} - -fn pad_keccak( - mut input: Vec, - num_blocks: usize, - num_witness: u32, -) -> (Vec, Vec, u32) { - let mut new_opcodes = Vec::new(); - let total_len = BLOCK_SIZE * num_blocks; - - let (mut num_witness, pad_witness, extra_opcodes) = pad(0x01, 8, num_witness); - - new_opcodes.extend(extra_opcodes); - input.push(pad_witness); - for _ in 0..total_len - input.len() { - let (updated_witness_counter, pad_witness, extra_opcodes) = pad(0x00, 8, num_witness); - new_opcodes.extend(extra_opcodes); - input.push(pad_witness); - num_witness = updated_witness_counter; - } - - let (zero_x_80, extra_opcodes, num_witness) = UInt8::load_constant(0x80, num_witness); - new_opcodes.extend(extra_opcodes); - let (final_pad, extra_opcodes, num_witness) = - UInt8::new(input[total_len - 1]).xor(&zero_x_80, num_witness); - new_opcodes.extend(extra_opcodes); - input[total_len - 1] = final_pad.inner; - - (input, new_opcodes, num_witness) -} diff --git a/acvm-repo/stdlib/src/blackbox_fallbacks/logic_fallbacks.rs b/acvm-repo/stdlib/src/blackbox_fallbacks/logic_fallbacks.rs deleted file mode 100644 index fa8c1060a26..00000000000 --- a/acvm-repo/stdlib/src/blackbox_fallbacks/logic_fallbacks.rs +++ /dev/null @@ -1,127 +0,0 @@ -use crate::{blackbox_fallbacks::utils::mul_with_witness, helpers::VariableStore}; - -use super::utils::{bit_decomposition, boolean_expr}; -use acir::{ - acir_field::FieldElement, - circuit::Opcode, - native_types::{Expression, Witness}, -}; - -// Range constraint -pub fn range(opcode: Expression, bit_size: u32, mut num_witness: u32) -> (u32, Vec) { - if bit_size == 1 { - let mut variables = VariableStore::new(&mut num_witness); - let bit_constraint = Opcode::Arithmetic(boolean_expr(&opcode, &mut variables)); - return (variables.finalize(), vec![bit_constraint]); - } - - let (new_opcodes, _, updated_witness_counter) = - bit_decomposition(opcode, bit_size, num_witness); - (updated_witness_counter, new_opcodes) -} - -/// Returns a set of opcodes which constrain `a & b == result` -/// -/// `a` and `b` are assumed to be constrained to fit within `bit_size` externally. -pub fn and( - a: Expression, - b: Expression, - result: Witness, - bit_size: u32, - mut num_witness: u32, -) -> (u32, Vec) { - if bit_size == 1 { - let mut variables = VariableStore::new(&mut num_witness); - - let mut and_expr = mul_with_witness(&a, &b, &mut variables); - and_expr.push_addition_term(-FieldElement::one(), result); - - return (variables.finalize(), vec![Opcode::Arithmetic(and_expr)]); - } - // Decompose the operands into bits - // - let (extra_opcodes_a, a_bits, updated_witness_counter) = - bit_decomposition(a, bit_size, num_witness); - - let (extra_opcodes_b, b_bits, updated_witness_counter) = - bit_decomposition(b, bit_size, updated_witness_counter); - - assert_eq!(a_bits.len(), b_bits.len()); - assert_eq!(a_bits.len(), bit_size as usize); - - let mut two_pow = FieldElement::one(); - let two = FieldElement::from(2_i128); - - // Build an expression that Multiplies each bit element-wise - // This gives the same truth table as the AND operation - // Additionally, we multiply by a power of 2 to build up the - // expected output; ie result = \sum 2^i x_i * y_i - let mut and_expr = Expression::default(); - for (a_bit, b_bit) in a_bits.into_iter().zip(b_bits) { - and_expr.push_multiplication_term(two_pow, a_bit, b_bit); - two_pow = two * two_pow; - } - and_expr.push_addition_term(-FieldElement::one(), result); - - and_expr.sort(); - - let mut new_opcodes = Vec::new(); - new_opcodes.extend(extra_opcodes_a); - new_opcodes.extend(extra_opcodes_b); - new_opcodes.push(Opcode::Arithmetic(and_expr)); - - (updated_witness_counter, new_opcodes) -} - -/// Returns a set of opcodes which constrain `a ^ b == result` -/// -/// `a` and `b` are assumed to be constrained to fit within `bit_size` externally. -pub fn xor( - a: Expression, - b: Expression, - result: Witness, - bit_size: u32, - mut num_witness: u32, -) -> (u32, Vec) { - if bit_size == 1 { - let mut variables = VariableStore::new(&mut num_witness); - - let product = mul_with_witness(&a, &b, &mut variables); - let mut xor_expr = &(&a + &b) - &product; - xor_expr.push_addition_term(-FieldElement::one(), result); - - return (variables.finalize(), vec![Opcode::Arithmetic(xor_expr)]); - } - - // Decompose the operands into bits - // - let (extra_opcodes_a, a_bits, updated_witness_counter) = - bit_decomposition(a, bit_size, num_witness); - let (extra_opcodes_b, b_bits, updated_witness_counter) = - bit_decomposition(b, bit_size, updated_witness_counter); - - assert_eq!(a_bits.len(), b_bits.len()); - assert_eq!(a_bits.len(), bit_size as usize); - - let mut two_pow = FieldElement::one(); - let two = FieldElement::from(2_i128); - - // Build an xor expression - // TODO: check this is the correct arithmetization - let mut xor_expr = Expression::default(); - for (a_bit, b_bit) in a_bits.into_iter().zip(b_bits) { - xor_expr.push_addition_term(two_pow, a_bit); - xor_expr.push_addition_term(two_pow, b_bit); - two_pow = two * two_pow; - xor_expr.push_multiplication_term(-two_pow, a_bit, b_bit); - } - xor_expr.push_addition_term(-FieldElement::one(), result); - - xor_expr.sort(); - let mut new_opcodes = Vec::new(); - new_opcodes.extend(extra_opcodes_a); - new_opcodes.extend(extra_opcodes_b); - new_opcodes.push(Opcode::Arithmetic(xor_expr)); - - (updated_witness_counter, new_opcodes) -} diff --git a/acvm-repo/stdlib/src/blackbox_fallbacks/mod.rs b/acvm-repo/stdlib/src/blackbox_fallbacks/mod.rs deleted file mode 100644 index d2ca3c50fa7..00000000000 --- a/acvm-repo/stdlib/src/blackbox_fallbacks/mod.rs +++ /dev/null @@ -1,19 +0,0 @@ -mod blake2s; -mod hash_to_field; -mod keccak256; -mod logic_fallbacks; -mod sha256; -#[macro_use] -mod uint; -mod uint32; -mod uint64; -mod uint8; -mod utils; -pub use blake2s::blake2s; -pub use hash_to_field::hash_to_field; -pub use keccak256::keccak256; -pub use logic_fallbacks::{and, range, xor}; -pub use sha256::sha256; -pub use uint32::UInt32; -pub use uint64::UInt64; -pub use uint8::UInt8; diff --git a/acvm-repo/stdlib/src/blackbox_fallbacks/sha256.rs b/acvm-repo/stdlib/src/blackbox_fallbacks/sha256.rs deleted file mode 100644 index 1661b030bcc..00000000000 --- a/acvm-repo/stdlib/src/blackbox_fallbacks/sha256.rs +++ /dev/null @@ -1,377 +0,0 @@ -//! Sha256 fallback function. -use super::uint32::UInt32; -use super::utils::{byte_decomposition, round_to_nearest_byte}; -use crate::helpers::VariableStore; -use acir::{ - brillig, - circuit::{ - brillig::{Brillig, BrilligInputs, BrilligOutputs}, - opcodes::{BlackBoxFuncCall, FunctionInput}, - Opcode, - }, - native_types::{Expression, Witness}, - FieldElement, -}; - -const INIT_CONSTANTS: [u32; 8] = [ - 0x6a09e667, 0xbb67ae85, 0x3c6ef372, 0xa54ff53a, 0x510e527f, 0x9b05688c, 0x1f83d9ab, 0x5be0cd19, -]; - -const ROUND_CONSTANTS: [u32; 64] = [ - 0x428a2f98, 0x71374491, 0xb5c0fbcf, 0xe9b5dba5, 0x3956c25b, 0x59f111f1, 0x923f82a4, 0xab1c5ed5, - 0xd807aa98, 0x12835b01, 0x243185be, 0x550c7dc3, 0x72be5d74, 0x80deb1fe, 0x9bdc06a7, 0xc19bf174, - 0xe49b69c1, 0xefbe4786, 0x0fc19dc6, 0x240ca1cc, 0x2de92c6f, 0x4a7484aa, 0x5cb0a9dc, 0x76f988da, - 0x983e5152, 0xa831c66d, 0xb00327c8, 0xbf597fc7, 0xc6e00bf3, 0xd5a79147, 0x06ca6351, 0x14292967, - 0x27b70a85, 0x2e1b2138, 0x4d2c6dfc, 0x53380d13, 0x650a7354, 0x766a0abb, 0x81c2c92e, 0x92722c85, - 0xa2bfe8a1, 0xa81a664b, 0xc24b8b70, 0xc76c51a3, 0xd192e819, 0xd6990624, 0xf40e3585, 0x106aa070, - 0x19a4c116, 0x1e376c08, 0x2748774c, 0x34b0bcb5, 0x391c0cb3, 0x4ed8aa4a, 0x5b9cca4f, 0x682e6ff3, - 0x748f82ee, 0x78a5636f, 0x84c87814, 0x8cc70208, 0x90befffa, 0xa4506ceb, 0xbef9a3f7, 0xc67178f2, -]; - -pub fn sha256( - inputs: Vec<(Expression, u32)>, - outputs: Vec, - mut num_witness: u32, -) -> (u32, Vec) { - let mut new_opcodes = Vec::new(); - let mut new_inputs = Vec::new(); - let mut total_num_bytes = 0; - - // Decompose the input field elements into bytes and collect the resulting witnesses. - for (witness, num_bits) in inputs { - let num_bytes = round_to_nearest_byte(num_bits); - total_num_bytes += num_bytes; - let (extra_opcodes, extra_inputs, updated_witness_counter) = - byte_decomposition(witness, num_bytes, num_witness); - new_opcodes.extend(extra_opcodes); - new_inputs.extend(extra_inputs); - num_witness = updated_witness_counter; - } - - let (result, num_witness, extra_opcodes) = - create_sha256_constraint(new_inputs, total_num_bytes, num_witness); - new_opcodes.extend(extra_opcodes); - - // constrain the outputs to be the same as the result of the circuit - for i in 0..outputs.len() { - let mut expr = Expression::from(outputs[i]); - expr.push_addition_term(-FieldElement::one(), result[i]); - new_opcodes.push(Opcode::Arithmetic(expr)); - } - (num_witness, new_opcodes) -} - -fn create_sha256_constraint( - mut input: Vec, - total_num_bytes: u32, - num_witness: u32, -) -> (Vec, u32, Vec) { - let mut new_opcodes = Vec::new(); - - // pad the bytes according to sha256 padding rules - let message_bits = total_num_bytes * 8; - let (mut num_witness, pad_witness, extra_opcodes) = pad(128, 8, num_witness); - new_opcodes.extend(extra_opcodes); - input.push(pad_witness); - let bytes_per_block = 64; - let num_bytes = (input.len() + 8) as u32; - let num_blocks = num_bytes / bytes_per_block + ((num_bytes % bytes_per_block != 0) as u32); - let num_total_bytes = num_blocks * bytes_per_block; - for _ in num_bytes..num_total_bytes { - let (updated_witness_counter, pad_witness, extra_opcodes) = pad(0, 8, num_witness); - num_witness = updated_witness_counter; - new_opcodes.extend(extra_opcodes); - input.push(pad_witness); - } - let (num_witness, pad_witness, extra_opcodes) = pad(message_bits, 64, num_witness); - new_opcodes.extend(extra_opcodes); - let (extra_opcodes, pad_witness, num_witness) = - byte_decomposition(pad_witness.into(), 8, num_witness); - new_opcodes.extend(extra_opcodes); - input.extend(pad_witness); - - // turn witness into u32 and load sha256 state - let (input, extra_opcodes, num_witness) = UInt32::from_witnesses(&input, num_witness); - new_opcodes.extend(extra_opcodes); - let (mut rolling_hash, extra_opcodes, num_witness) = prepare_state_constants(num_witness); - new_opcodes.extend(extra_opcodes); - let (round_constants, extra_opcodes, mut num_witness) = prepare_round_constants(num_witness); - new_opcodes.extend(extra_opcodes); - // split the input into blocks of size 16 - let input: Vec> = input.chunks(16).map(|block| block.to_vec()).collect(); - - // process sha256 blocks - for i in &input { - let (new_rolling_hash, extra_opcodes, updated_witness_counter) = - sha256_block(i, rolling_hash.clone(), round_constants.clone(), num_witness); - new_opcodes.extend(extra_opcodes); - num_witness = updated_witness_counter; - rolling_hash = new_rolling_hash; - } - - // decompose the result bytes in u32 to u8 - let (extra_opcodes, byte1, num_witness) = - byte_decomposition(Expression::from(rolling_hash[0].inner), 4, num_witness); - new_opcodes.extend(extra_opcodes); - let (extra_opcodes, byte2, num_witness) = - byte_decomposition(Expression::from(rolling_hash[1].inner), 4, num_witness); - new_opcodes.extend(extra_opcodes); - let (extra_opcodes, byte3, num_witness) = - byte_decomposition(Expression::from(rolling_hash[2].inner), 4, num_witness); - new_opcodes.extend(extra_opcodes); - let (extra_opcodes, byte4, num_witness) = - byte_decomposition(Expression::from(rolling_hash[3].inner), 4, num_witness); - new_opcodes.extend(extra_opcodes); - let (extra_opcodes, byte5, num_witness) = - byte_decomposition(Expression::from(rolling_hash[4].inner), 4, num_witness); - new_opcodes.extend(extra_opcodes); - let (extra_opcodes, byte6, num_witness) = - byte_decomposition(Expression::from(rolling_hash[5].inner), 4, num_witness); - new_opcodes.extend(extra_opcodes); - let (extra_opcodes, byte7, num_witness) = - byte_decomposition(Expression::from(rolling_hash[6].inner), 4, num_witness); - new_opcodes.extend(extra_opcodes); - let (extra_opcodes, byte8, num_witness) = - byte_decomposition(Expression::from(rolling_hash[7].inner), 4, num_witness); - new_opcodes.extend(extra_opcodes); - - let result = vec![byte1, byte2, byte3, byte4, byte5, byte6, byte7, byte8] - .into_iter() - .flatten() - .collect(); - - (result, num_witness, new_opcodes) -} - -pub(crate) fn pad(number: u32, bit_size: u32, mut num_witness: u32) -> (u32, Witness, Vec) { - let mut new_opcodes = Vec::new(); - let mut variables = VariableStore::new(&mut num_witness); - let pad = variables.new_variable(); - - let brillig_opcode = Opcode::Brillig(Brillig { - inputs: vec![BrilligInputs::Single(Expression { - mul_terms: vec![], - linear_combinations: vec![], - q_c: FieldElement::from(number as u128), - })], - outputs: vec![BrilligOutputs::Simple(pad)], - bytecode: vec![brillig::Opcode::Stop], - predicate: None, - }); - new_opcodes.push(brillig_opcode); - - let range = Opcode::BlackBoxFuncCall(BlackBoxFuncCall::RANGE { - input: FunctionInput { witness: pad, num_bits: bit_size }, - }); - new_opcodes.push(range); - - (num_witness, pad, new_opcodes) -} - -fn sha256_block( - input: &[UInt32], - rolling_hash: Vec, - round_constants: Vec, - mut num_witness: u32, -) -> (Vec, Vec, u32) { - let mut new_opcodes = Vec::new(); - let mut w = Vec::new(); - w.extend(input.to_owned()); - - for i in 16..64 { - // calculate s0 `w[i - 15].ror(7) ^ w[i - 15].ror(18) ^ (w[i - 15] >> 3)` - let (a1, extra_opcodes, updated_witness_counter) = w[i - 15].ror(7, num_witness); - new_opcodes.extend(extra_opcodes); - let (a2, extra_opcodes, updated_witness_counter) = - w[i - 15].ror(18, updated_witness_counter); - new_opcodes.extend(extra_opcodes); - let (a3, extra_opcodes, updated_witness_counter) = - w[i - 15].rightshift(3, updated_witness_counter); - new_opcodes.extend(extra_opcodes); - let (a4, extra_opcodes, updated_witness_counter) = a1.xor(&a2, updated_witness_counter); - new_opcodes.extend(extra_opcodes); - let (s0, extra_opcodes, updated_witness_counter) = a4.xor(&a3, updated_witness_counter); - new_opcodes.extend(extra_opcodes); - - // calculate s1 `w[i - 2].ror(17) ^ w[i - 2].ror(19) ^ (w[i - 2] >> 10)` - let (b1, extra_opcodes, updated_witness_counter) = - w[i - 2].ror(17, updated_witness_counter); - new_opcodes.extend(extra_opcodes); - let (b2, extra_opcodes, updated_witness_counter) = - w[i - 2].ror(19, updated_witness_counter); - new_opcodes.extend(extra_opcodes); - let (b3, extra_opcodes, updated_witness_counter) = - w[i - 2].rightshift(10, updated_witness_counter); - new_opcodes.extend(extra_opcodes); - let (b4, extra_opcodes, updated_witness_counter) = b1.xor(&b2, updated_witness_counter); - new_opcodes.extend(extra_opcodes); - let (s1, extra_opcodes, updated_witness_counter) = b4.xor(&b3, updated_witness_counter); - new_opcodes.extend(extra_opcodes); - - // calculate w[i] `w[i - 16] + w[i - 7] + s0 + s1` - let (c1, extra_opcodes, updated_witness_counter) = - w[i - 16].add(&w[i - 7], updated_witness_counter); - new_opcodes.extend(extra_opcodes); - let (c2, extra_opcodes, updated_witness_counter) = c1.add(&s0, updated_witness_counter); - new_opcodes.extend(extra_opcodes); - let (c3, extra_opcodes, updated_witness_counter) = c2.add(&s1, updated_witness_counter); - new_opcodes.extend(extra_opcodes); - w.push(c3); - num_witness = updated_witness_counter; - } - - let mut a = rolling_hash[0]; - let mut b = rolling_hash[1]; - let mut c = rolling_hash[2]; - let mut d = rolling_hash[3]; - let mut e = rolling_hash[4]; - let mut f = rolling_hash[5]; - let mut g = rolling_hash[6]; - let mut h = rolling_hash[7]; - - #[allow(non_snake_case)] - for i in 0..64 { - // calculate S1 `e.ror(6) ^ e.ror(11) ^ e.ror(25)` - let (a1, extra_opcodes, updated_witness_counter) = e.ror(6, num_witness); - new_opcodes.extend(extra_opcodes); - let (a2, extra_opcodes, updated_witness_counter) = e.ror(11, updated_witness_counter); - new_opcodes.extend(extra_opcodes); - let (a3, extra_opcodes, updated_witness_counter) = e.ror(25, updated_witness_counter); - new_opcodes.extend(extra_opcodes); - let (a4, extra_opcodes, updated_witness_counter) = a1.xor(&a2, updated_witness_counter); - new_opcodes.extend(extra_opcodes); - let (S1, extra_opcodes, updated_witness_counter) = a4.xor(&a3, updated_witness_counter); - new_opcodes.extend(extra_opcodes); - - // calculate ch `(e & f) + (~e & g)` - let (b1, extra_opcodes, updated_witness_counter) = e.and(&f, updated_witness_counter); - new_opcodes.extend(extra_opcodes); - let (b2, extra_opcodes, updated_witness_counter) = e.not(updated_witness_counter); - new_opcodes.extend(extra_opcodes); - let (b3, extra_opcodes, updated_witness_counter) = b2.and(&g, updated_witness_counter); - new_opcodes.extend(extra_opcodes); - let (ch, extra_opcodes, updated_witness_counter) = b1.add(&b3, updated_witness_counter); - new_opcodes.extend(extra_opcodes); - - // caculate temp1 `h + S1 + ch + round_constants[i] + w[i]` - let (c1, extra_opcodes, updated_witness_counter) = h.add(&S1, updated_witness_counter); - new_opcodes.extend(extra_opcodes); - let (c2, extra_opcodes, updated_witness_counter) = c1.add(&ch, updated_witness_counter); - new_opcodes.extend(extra_opcodes); - let (c3, extra_opcodes, updated_witness_counter) = - c2.add(&round_constants[i], updated_witness_counter); - new_opcodes.extend(extra_opcodes); - let (temp1, extra_opcodes, updated_witness_counter) = - c3.add(&w[i], updated_witness_counter); - new_opcodes.extend(extra_opcodes); - - // calculate S0 `a.ror(2) ^ a.ror(13) ^ a.ror(22)` - let (d1, extra_opcodes, updated_witness_counter) = a.ror(2, updated_witness_counter); - new_opcodes.extend(extra_opcodes); - let (d2, extra_opcodes, updated_witness_counter) = a.ror(13, updated_witness_counter); - new_opcodes.extend(extra_opcodes); - let (d3, extra_opcodes, updated_witness_counter) = a.ror(22, updated_witness_counter); - new_opcodes.extend(extra_opcodes); - let (d4, extra_opcodes, updated_witness_counter) = d1.xor(&d2, updated_witness_counter); - new_opcodes.extend(extra_opcodes); - let (S0, extra_opcodes, updated_witness_counter) = d4.xor(&d3, updated_witness_counter); - new_opcodes.extend(extra_opcodes); - - // calculate T0 `b & c` - let (T0, extra_opcodes, updated_witness_counter) = b.and(&c, updated_witness_counter); - new_opcodes.extend(extra_opcodes); - - // calculate maj `(a & (b + c - (T0 + T0))) + T0` which is the same as `(a & b) ^ (a & c) ^ (b & c)` - let (e1, extra_opcodes, updated_witness_counter) = T0.add(&T0, updated_witness_counter); - new_opcodes.extend(extra_opcodes); - let (e2, extra_opcodes, updated_witness_counter) = c.sub(&e1, updated_witness_counter); - new_opcodes.extend(extra_opcodes); - let (e3, extra_opcodes, updated_witness_counter) = b.add(&e2, updated_witness_counter); - new_opcodes.extend(extra_opcodes); - let (e4, extra_opcodes, updated_witness_counter) = a.and(&e3, updated_witness_counter); - new_opcodes.extend(extra_opcodes); - let (maj, extra_opcodes, updated_witness_counter) = e4.add(&T0, updated_witness_counter); - new_opcodes.extend(extra_opcodes); - - // calculate temp2 `S0 + maj` - let (temp2, extra_opcodes, updated_witness_counter) = S0.add(&maj, updated_witness_counter); - new_opcodes.extend(extra_opcodes); - - h = g; - g = f; - f = e; - let (new_e, extra_opcodes, updated_witness_counter) = - d.add(&temp1, updated_witness_counter); - new_opcodes.extend(extra_opcodes); - d = c; - c = b; - b = a; - let (new_a, extra_opcodes, updated_witness_counter) = - temp1.add(&temp2, updated_witness_counter); - new_opcodes.extend(extra_opcodes); - num_witness = updated_witness_counter; - a = new_a; - e = new_e; - } - - let mut output = Vec::new(); - let (output0, extra_opcodes, num_witness) = a.add(&rolling_hash[0], num_witness); - new_opcodes.extend(extra_opcodes); - let (output1, extra_opcodes, num_witness) = b.add(&rolling_hash[1], num_witness); - new_opcodes.extend(extra_opcodes); - let (output2, extra_opcodes, num_witness) = c.add(&rolling_hash[2], num_witness); - new_opcodes.extend(extra_opcodes); - let (output3, extra_opcodes, num_witness) = d.add(&rolling_hash[3], num_witness); - new_opcodes.extend(extra_opcodes); - let (output4, extra_opcodes, num_witness) = e.add(&rolling_hash[4], num_witness); - new_opcodes.extend(extra_opcodes); - let (output5, extra_opcodes, num_witness) = f.add(&rolling_hash[5], num_witness); - new_opcodes.extend(extra_opcodes); - let (output6, extra_opcodes, num_witness) = g.add(&rolling_hash[6], num_witness); - new_opcodes.extend(extra_opcodes); - let (output7, extra_opcodes, num_witness) = h.add(&rolling_hash[7], num_witness); - new_opcodes.extend(extra_opcodes); - - output.push(output0); - output.push(output1); - output.push(output2); - output.push(output3); - output.push(output4); - output.push(output5); - output.push(output6); - output.push(output7); - - (output, new_opcodes, num_witness) -} - -/// Load initial state constants of Sha256 -pub(crate) fn prepare_state_constants(mut num_witness: u32) -> (Vec, Vec, u32) { - let mut new_opcodes = Vec::new(); - let mut new_witnesses = Vec::new(); - - for i in INIT_CONSTANTS { - let (new_witness, extra_opcodes, updated_witness_counter) = - UInt32::load_constant(i, num_witness); - new_opcodes.extend(extra_opcodes); - new_witnesses.push(new_witness); - num_witness = updated_witness_counter; - } - - (new_witnesses, new_opcodes, num_witness) -} - -/// Load round constants of Sha256 -pub(crate) fn prepare_round_constants(mut num_witness: u32) -> (Vec, Vec, u32) { - let mut new_opcodes = Vec::new(); - let mut new_witnesses = Vec::new(); - - for i in ROUND_CONSTANTS { - let (new_witness, extra_opcodes, updated_witness_counter) = - UInt32::load_constant(i, num_witness); - new_opcodes.extend(extra_opcodes); - new_witnesses.push(new_witness); - num_witness = updated_witness_counter; - } - - (new_witnesses, new_opcodes, num_witness) -} diff --git a/acvm-repo/stdlib/src/blackbox_fallbacks/uint.rs b/acvm-repo/stdlib/src/blackbox_fallbacks/uint.rs deleted file mode 100644 index 6f4039835f7..00000000000 --- a/acvm-repo/stdlib/src/blackbox_fallbacks/uint.rs +++ /dev/null @@ -1,648 +0,0 @@ -#[macro_export] -macro_rules! impl_uint { - ( - $name:ident, - $type:ty, - $size:expr - ) => { - use acir::{ - brillig::{self, RegisterIndex}, - circuit::{ - brillig::{Brillig, BrilligInputs, BrilligOutputs}, - directives::QuotientDirective, - opcodes::{BlackBoxFuncCall, FunctionInput}, - Opcode, - }, - native_types::{Expression, Witness}, - FieldElement, - }; - use $crate::helpers::VariableStore; - - /// UInt contains a witness that points to a field element that represents a u32 integer - /// It has a inner field of type [Witness] that points to the field element and width = 32 - #[derive(Copy, Clone, Debug)] - pub struct $name { - pub(crate) inner: Witness, - width: u32, - } - - impl $name { - #[cfg(any(test, feature = "testing"))] - pub fn get_inner(&self) -> Witness { - self.inner - } - } - - impl $name { - /// Initialize A new [UInt] type with a [Witness] - pub fn new(witness: Witness) -> Self { - $name { inner: witness, width: $size } - } - - /// Get u(n) + 1 - pub(crate) fn get_max_plus_one( - &self, - mut num_witness: u32, - ) -> ($name, Vec, u32) { - let mut new_opcodes = Vec::new(); - let mut variables = VariableStore::new(&mut num_witness); - let new_witness = variables.new_variable(); - - let brillig_opcode = Opcode::Brillig(Brillig { - inputs: vec![BrilligInputs::Single(Expression { - mul_terms: vec![], - linear_combinations: vec![], - q_c: FieldElement::from(2_u128.pow(self.width)), - })], - outputs: vec![BrilligOutputs::Simple(new_witness)], - bytecode: vec![brillig::Opcode::Stop], - predicate: None, - }); - new_opcodes.push(brillig_opcode); - let num_witness = variables.finalize(); - - ($name::new(new_witness), new_opcodes, num_witness) - } - - /// Load a constant into the circuit - pub(crate) fn load_constant( - constant: $type, - mut num_witness: u32, - ) -> ($name, Vec, u32) { - let mut new_opcodes = Vec::new(); - let mut variables = VariableStore::new(&mut num_witness); - let new_witness = variables.new_variable(); - - let brillig_opcode = Opcode::Brillig(Brillig { - inputs: vec![BrilligInputs::Single(Expression { - mul_terms: vec![], - linear_combinations: vec![], - q_c: FieldElement::from(constant as u128), - })], - outputs: vec![BrilligOutputs::Simple(new_witness)], - bytecode: vec![brillig::Opcode::Stop], - predicate: None, - }); - new_opcodes.push(brillig_opcode); - let num_witness = variables.finalize(); - - ($name::new(new_witness), new_opcodes, num_witness) - } - - /// Returns the quotient and remainder such that lhs = rhs * quotient + remainder - // This should be the same as its equivalent in the Noir repo - pub fn euclidean_division( - lhs: &$name, - rhs: &$name, - mut num_witness: u32, - ) -> ($name, $name, Vec, u32) { - let mut new_opcodes = Vec::new(); - let mut variables = VariableStore::new(&mut num_witness); - let q_witness = variables.new_variable(); - let r_witness = variables.new_variable(); - - // compute quotient using directive function - let quotient_opcode = Opcode::Directive( - acir::circuit::directives::Directive::Quotient(QuotientDirective { - a: lhs.inner.into(), - b: rhs.inner.into(), - q: q_witness, - r: r_witness, - predicate: None, - }), - ); - new_opcodes.push(quotient_opcode); - - // make sure r and q are in 32 bit range - let r_range_opcode = Opcode::BlackBoxFuncCall(BlackBoxFuncCall::RANGE { - input: FunctionInput { witness: r_witness, num_bits: lhs.width }, - }); - let q_range_opcode = Opcode::BlackBoxFuncCall(BlackBoxFuncCall::RANGE { - input: FunctionInput { witness: q_witness, num_bits: lhs.width }, - }); - new_opcodes.push(r_range_opcode); - new_opcodes.push(q_range_opcode); - let num_witness = variables.finalize(); - - // constrain r < rhs - let (rhs_sub_r, extra_opcodes, num_witness) = - rhs.sub_no_overflow(&$name::new(r_witness), num_witness); - new_opcodes.extend(extra_opcodes); - let rhs_sub_r_range_opcode = Opcode::BlackBoxFuncCall(BlackBoxFuncCall::RANGE { - input: FunctionInput { witness: rhs_sub_r.inner, num_bits: lhs.width }, - }); - new_opcodes.push(rhs_sub_r_range_opcode); - - // constrain lhs = rhs * quotient + remainder - let rhs_expr = Expression::from(rhs.inner); - let lhs_constraint = Expression::from(lhs.inner); - let rhs_constraint = &rhs_expr * &Expression::from(q_witness); - let rhs_constraint = &rhs_constraint.unwrap() + &Expression::from(r_witness); - let div_euclidean = &lhs_constraint - &rhs_constraint; - new_opcodes.push(Opcode::Arithmetic(div_euclidean)); - - ($name::new(q_witness), $name::new(r_witness), new_opcodes, num_witness) - } - - /// Rotate left `rotation` bits. `(x << rotation) | (x >> (width - rotation))` - // This should be the same as `u32.rotate_left(rotation)` in rust stdlib - pub fn rol(&self, rotation: u32, num_witness: u32) -> ($name, Vec, u32) { - let rotation = rotation % self.width; - let mut new_opcodes = Vec::new(); - let (right_shift, extra_opcodes, num_witness) = - self.rightshift(self.width - rotation, num_witness); - new_opcodes.extend(extra_opcodes); - let (left_shift, extra_opcodes, num_witness) = - self.leftshift(rotation, num_witness); - new_opcodes.extend(extra_opcodes); - let (result, extra_opcodes, num_witness) = left_shift.or(&right_shift, num_witness); - new_opcodes.extend(extra_opcodes); - - (result, new_opcodes, num_witness) - } - - /// Rotate right `rotation` bits. `(x >> rotation) | (x << (width - rotation))` - // This should be the same as `u32.rotate_right(rotation)` in rust stdlib - pub fn ror(&self, rotation: u32, num_witness: u32) -> ($name, Vec, u32) { - let rotation = rotation % self.width; - let mut new_opcodes = Vec::new(); - let (left_shift, extra_opcodes, num_witness) = - self.leftshift(self.width - rotation, num_witness); - new_opcodes.extend(extra_opcodes); - let (right_shift, extra_opcodes, num_witness) = - self.rightshift(rotation, num_witness); - new_opcodes.extend(extra_opcodes); - let (result, extra_opcodes, num_witness) = left_shift.or(&right_shift, num_witness); - new_opcodes.extend(extra_opcodes); - - (result, new_opcodes, num_witness) - } - - /// left shift by `bits` - pub fn leftshift(&self, bits: u32, num_witness: u32) -> ($name, Vec, u32) { - let bits = bits % self.width; - let mut new_opcodes = Vec::new(); - let two: $type = 2; - let (two_pow_rhs, extra_opcodes, num_witness) = - $name::load_constant(two.pow(bits), num_witness); - new_opcodes.extend(extra_opcodes); - let (left_shift, extra_opcodes, num_witness) = self.mul(&two_pow_rhs, num_witness); - new_opcodes.extend(extra_opcodes); - - (left_shift, new_opcodes, num_witness) - } - - /// right shift by `bits` - pub fn rightshift(&self, bits: u32, num_witness: u32) -> ($name, Vec, u32) { - let bits = bits % self.width; - let mut new_opcodes = Vec::new(); - let two: $type = 2; - let (two_pow_rhs, extra_opcodes, num_witness) = - $name::load_constant(two.pow(bits), num_witness); - new_opcodes.extend(extra_opcodes); - let (right_shift, _, extra_opcodes, num_witness) = - $name::euclidean_division(self, &two_pow_rhs, num_witness); - new_opcodes.extend(extra_opcodes); - - (right_shift, new_opcodes, num_witness) - } - - /// Caculate and constrain `self` + `rhs` - pub fn add(&self, rhs: &$name, mut num_witness: u32) -> ($name, Vec, u32) { - let mut new_opcodes = Vec::new(); - let mut variables = VariableStore::new(&mut num_witness); - let new_witness = variables.new_variable(); - - // calculate `self` + `rhs` with overflow - let brillig_opcode = Opcode::Brillig(Brillig { - inputs: vec![ - BrilligInputs::Single(Expression { - mul_terms: vec![], - linear_combinations: vec![(FieldElement::one(), self.inner)], - q_c: FieldElement::zero(), - }), - BrilligInputs::Single(Expression { - mul_terms: vec![], - linear_combinations: vec![(FieldElement::one(), rhs.inner)], - q_c: FieldElement::zero(), - }), - ], - outputs: vec![BrilligOutputs::Simple(new_witness)], - bytecode: vec![brillig::Opcode::BinaryIntOp { - op: brillig::BinaryIntOp::Add, - bit_size: 127, - lhs: RegisterIndex::from(0), - rhs: RegisterIndex::from(1), - destination: RegisterIndex::from(0), - }], - predicate: None, - }); - new_opcodes.push(brillig_opcode); - let num_witness = variables.finalize(); - - // constrain addition - let mut add_expr = Expression::from(new_witness); - add_expr.push_addition_term(-FieldElement::one(), self.inner); - add_expr.push_addition_term(-FieldElement::one(), rhs.inner); - new_opcodes.push(Opcode::Arithmetic(add_expr)); - - // mod 2^width to get final result as the remainder - let (two_pow_width, extra_opcodes, num_witness) = - self.get_max_plus_one(num_witness); - new_opcodes.extend(extra_opcodes); - let (_, add_mod, extra_opcodes, num_witness) = $name::euclidean_division( - &$name::new(new_witness), - &two_pow_width, - num_witness, - ); - new_opcodes.extend(extra_opcodes); - - (add_mod, new_opcodes, num_witness) - } - - /// Caculate and constrain `self` - `rhs` - pub fn sub(&self, rhs: &$name, mut num_witness: u32) -> ($name, Vec, u32) { - let mut new_opcodes = Vec::new(); - let mut variables = VariableStore::new(&mut num_witness); - let new_witness = variables.new_variable(); - - // calculate 2^32 + self - rhs to avoid overflow - let brillig_opcode = Opcode::Brillig(Brillig { - inputs: vec![ - BrilligInputs::Single(Expression { - mul_terms: vec![], - linear_combinations: vec![(FieldElement::one(), self.inner)], - q_c: FieldElement::zero(), - }), - BrilligInputs::Single(Expression { - mul_terms: vec![], - linear_combinations: vec![(FieldElement::one(), rhs.inner)], - q_c: FieldElement::zero(), - }), - BrilligInputs::Single(Expression { - mul_terms: vec![], - linear_combinations: vec![], - q_c: FieldElement::from(1_u128 << self.width), - }), - ], - outputs: vec![BrilligOutputs::Simple(new_witness)], - bytecode: vec![ - brillig::Opcode::BinaryIntOp { - op: brillig::BinaryIntOp::Add, - bit_size: 127, - lhs: RegisterIndex::from(0), - rhs: RegisterIndex::from(2), - destination: RegisterIndex::from(0), - }, - brillig::Opcode::BinaryIntOp { - op: brillig::BinaryIntOp::Sub, - bit_size: 127, - lhs: RegisterIndex::from(0), - rhs: RegisterIndex::from(1), - destination: RegisterIndex::from(0), - }, - ], - predicate: None, - }); - new_opcodes.push(brillig_opcode); - let num_witness = variables.finalize(); - - // constrain subtraction - let mut sub_constraint = Expression::from(self.inner); - sub_constraint.push_addition_term(-FieldElement::one(), new_witness); - sub_constraint.push_addition_term(-FieldElement::one(), rhs.inner); - sub_constraint.q_c = FieldElement::from(1_u128 << self.width); - new_opcodes.push(Opcode::Arithmetic(sub_constraint)); - - // mod 2^width to get final result as the remainder - let (two_pow_width, extra_opcodes, num_witness) = - self.get_max_plus_one(num_witness); - new_opcodes.extend(extra_opcodes); - let (_, sub_mod, extra_opcodes, num_witness) = $name::euclidean_division( - &$name::new(new_witness), - &two_pow_width, - num_witness, - ); - new_opcodes.extend(extra_opcodes); - - (sub_mod, new_opcodes, num_witness) - } - - /// Calculate and constrain `self` - `rhs` - 1 without allowing overflow - /// This is a helper function to `euclidean_division` - // There is a `-1` because theres a case where rhs = 2^32 and remainder = 0 - pub(crate) fn sub_no_overflow( - &self, - rhs: &$name, - mut num_witness: u32, - ) -> ($name, Vec, u32) { - let mut new_opcodes = Vec::new(); - let mut variables = VariableStore::new(&mut num_witness); - let new_witness = variables.new_variable(); - - // calculate self - rhs - 1 - let brillig_opcode = Opcode::Brillig(Brillig { - inputs: vec![ - BrilligInputs::Single(Expression { - mul_terms: vec![], - linear_combinations: vec![(FieldElement::one(), self.inner)], - q_c: FieldElement::zero(), - }), - BrilligInputs::Single(Expression { - mul_terms: vec![], - linear_combinations: vec![(FieldElement::one(), rhs.inner)], - q_c: FieldElement::zero(), - }), - BrilligInputs::Single(Expression { - mul_terms: vec![], - linear_combinations: vec![], - q_c: FieldElement::one(), - }), - ], - outputs: vec![BrilligOutputs::Simple(new_witness)], - bytecode: vec![ - brillig::Opcode::BinaryIntOp { - op: brillig::BinaryIntOp::Sub, - bit_size: 127, - lhs: RegisterIndex::from(0), - rhs: RegisterIndex::from(1), - destination: RegisterIndex::from(0), - }, - brillig::Opcode::BinaryIntOp { - op: brillig::BinaryIntOp::Sub, - bit_size: 127, - lhs: RegisterIndex::from(0), - rhs: RegisterIndex::from(2), - destination: RegisterIndex::from(0), - }, - ], - predicate: None, - }); - new_opcodes.push(brillig_opcode); - let num_witness = variables.finalize(); - - // constrain subtraction - let mut sub_constraint = Expression::from(self.inner); - sub_constraint.push_addition_term(-FieldElement::one(), new_witness); - sub_constraint.push_addition_term(-FieldElement::one(), rhs.inner); - sub_constraint.q_c = -FieldElement::one(); - new_opcodes.push(Opcode::Arithmetic(sub_constraint)); - - ($name::new(new_witness), new_opcodes, num_witness) - } - - /// Calculate and constrain `self` * `rhs` - pub(crate) fn mul( - &self, - rhs: &$name, - mut num_witness: u32, - ) -> ($name, Vec, u32) { - let mut new_opcodes = Vec::new(); - let mut variables = VariableStore::new(&mut num_witness); - let new_witness = variables.new_variable(); - - // calulate `self` * `rhs` with overflow - let brillig_opcode = Opcode::Brillig(Brillig { - inputs: vec![ - BrilligInputs::Single(Expression { - mul_terms: vec![], - linear_combinations: vec![(FieldElement::one(), self.inner)], - q_c: FieldElement::zero(), - }), - BrilligInputs::Single(Expression { - mul_terms: vec![], - linear_combinations: vec![(FieldElement::one(), rhs.inner)], - q_c: FieldElement::zero(), - }), - ], - outputs: vec![BrilligOutputs::Simple(new_witness)], - bytecode: vec![brillig::Opcode::BinaryFieldOp { - op: brillig::BinaryFieldOp::Mul, - lhs: RegisterIndex::from(0), - rhs: RegisterIndex::from(1), - destination: RegisterIndex::from(0), - }], - predicate: None, - }); - new_opcodes.push(brillig_opcode); - let num_witness = variables.finalize(); - - // constrain mul - let mut mul_constraint = Expression::from(new_witness); - mul_constraint.push_multiplication_term( - -FieldElement::one(), - self.inner, - rhs.inner, - ); - new_opcodes.push(Opcode::Arithmetic(mul_constraint)); - - // mod 2^width to get final result as the remainder - let (two_pow_rhs, extra_opcodes, num_witness) = self.get_max_plus_one(num_witness); - new_opcodes.extend(extra_opcodes); - let (_, mul_mod, extra_opcodes, num_witness) = - $name::euclidean_division(&$name::new(new_witness), &two_pow_rhs, num_witness); - new_opcodes.extend(extra_opcodes); - - (mul_mod, new_opcodes, num_witness) - } - - /// Calculate and constrain `self` and `rhs` - pub fn and(&self, rhs: &$name, mut num_witness: u32) -> ($name, Vec, u32) { - let mut new_opcodes = Vec::new(); - let mut variables = VariableStore::new(&mut num_witness); - let new_witness = variables.new_variable(); - let num_witness = variables.finalize(); - let and_opcode = Opcode::BlackBoxFuncCall(BlackBoxFuncCall::AND { - lhs: FunctionInput { witness: self.inner, num_bits: self.width }, - rhs: FunctionInput { witness: rhs.inner, num_bits: self.width }, - output: new_witness, - }); - new_opcodes.push(and_opcode); - - ($name::new(new_witness), new_opcodes, num_witness) - } - - /// Calculate and constrain `self` xor `rhs` - pub fn xor(&self, rhs: &$name, mut num_witness: u32) -> ($name, Vec, u32) { - let mut new_opcodes = Vec::new(); - let mut variables = VariableStore::new(&mut num_witness); - let new_witness = variables.new_variable(); - let num_witness = variables.finalize(); - let xor_opcode = Opcode::BlackBoxFuncCall(BlackBoxFuncCall::XOR { - lhs: FunctionInput { witness: self.inner, num_bits: self.width }, - rhs: FunctionInput { witness: rhs.inner, num_bits: self.width }, - output: new_witness, - }); - new_opcodes.push(xor_opcode); - - ($name::new(new_witness), new_opcodes, num_witness) - } - - /// Calculate and constrain `self` or `rhs` - pub fn or(&self, rhs: &$name, num_witness: u32) -> ($name, Vec, u32) { - let mut new_opcodes = Vec::new(); - - // a | b = (a & b) + (a ^ b) - let (a_and_b, extra_opcodes, num_witness) = self.and(rhs, num_witness); - new_opcodes.extend(extra_opcodes); - let (a_xor_b, extra_opcodes, num_witness) = self.xor(rhs, num_witness); - new_opcodes.extend(extra_opcodes); - let (or, extra_opcodes, num_witness) = a_and_b.add(&a_xor_b, num_witness); - new_opcodes.extend(extra_opcodes); - - (or, new_opcodes, num_witness) - } - - /// Calculate and constrain not `self` - pub(crate) fn not(&self, mut num_witness: u32) -> ($name, Vec, u32) { - let mut new_opcodes = Vec::new(); - let mut variables = VariableStore::new(&mut num_witness); - let new_witness = variables.new_variable(); - - let brillig_opcode = Opcode::Brillig(Brillig { - inputs: vec![ - BrilligInputs::Single(Expression { - mul_terms: vec![], - linear_combinations: vec![(FieldElement::one(), self.inner)], - q_c: FieldElement::zero(), - }), - BrilligInputs::Single(Expression { - mul_terms: vec![], - linear_combinations: vec![], - q_c: FieldElement::from((1_u128 << self.width) - 1), - }), - ], - outputs: vec![BrilligOutputs::Simple(new_witness)], - bytecode: vec![brillig::Opcode::BinaryIntOp { - op: brillig::BinaryIntOp::Sub, - bit_size: self.width, - lhs: RegisterIndex::from(1), - rhs: RegisterIndex::from(0), - destination: RegisterIndex::from(0), - }], - predicate: None, - }); - new_opcodes.push(brillig_opcode); - let num_witness = variables.finalize(); - - let mut not_constraint = Expression::from(new_witness); - not_constraint.push_addition_term(FieldElement::one(), self.inner); - not_constraint.q_c = -FieldElement::from((1_u128 << self.width) - 1); - new_opcodes.push(Opcode::Arithmetic(not_constraint)); - - ($name::new(new_witness), new_opcodes, num_witness) - } - - /// Calculate and constrain `self` >= `rhs` - // This should be similar to its equivalent in the Noir repo - pub(crate) fn more_than_eq_comparison( - &self, - rhs: &$name, - mut num_witness: u32, - ) -> ($name, Vec, u32) { - let mut new_opcodes = Vec::new(); - let mut variables = VariableStore::new(&mut num_witness); - let new_witness = variables.new_variable(); - let q_witness = variables.new_variable(); - let r_witness = variables.new_variable(); - - // calculate 2^32 + self - rhs - let brillig_opcode = Opcode::Brillig(Brillig { - inputs: vec![ - BrilligInputs::Single(Expression { - mul_terms: vec![], - linear_combinations: vec![(FieldElement::one(), self.inner)], - q_c: FieldElement::zero(), - }), - BrilligInputs::Single(Expression { - mul_terms: vec![], - linear_combinations: vec![(FieldElement::one(), rhs.inner)], - q_c: FieldElement::zero(), - }), - BrilligInputs::Single(Expression { - mul_terms: vec![], - linear_combinations: vec![], - q_c: FieldElement::from(1_u128 << self.width), - }), - ], - outputs: vec![BrilligOutputs::Simple(new_witness)], - bytecode: vec![ - brillig::Opcode::BinaryIntOp { - op: brillig::BinaryIntOp::Add, - bit_size: 127, - lhs: RegisterIndex::from(0), - rhs: RegisterIndex::from(2), - destination: RegisterIndex::from(0), - }, - brillig::Opcode::BinaryIntOp { - op: brillig::BinaryIntOp::Sub, - bit_size: 127, - lhs: RegisterIndex::from(0), - rhs: RegisterIndex::from(1), - destination: RegisterIndex::from(0), - }, - ], - predicate: None, - }); - new_opcodes.push(brillig_opcode); - let num_witness = variables.finalize(); - - // constrain subtraction - let mut sub_constraint = Expression::from(self.inner); - sub_constraint.push_addition_term(-FieldElement::one(), new_witness); - sub_constraint.push_addition_term(-FieldElement::one(), rhs.inner); - sub_constraint.q_c = FieldElement::from(1_u128 << self.width); - new_opcodes.push(Opcode::Arithmetic(sub_constraint)); - - let (two_pow_rhs, extra_opcodes, num_witness) = self.get_max_plus_one(num_witness); - new_opcodes.extend(extra_opcodes); - - // constraint 2^{max_bits} + a - b = q * 2^{max_bits} + r - // q = 1 if a == b - // q = 1 if a > b - // q = 0 if a < b - let quotient_opcode = Opcode::Directive( - acir::circuit::directives::Directive::Quotient(QuotientDirective { - a: new_witness.into(), - b: two_pow_rhs.inner.into(), - q: q_witness, - r: r_witness, - predicate: None, - }), - ); - new_opcodes.push(quotient_opcode); - - // make sure r in 32 bit range and q is 1 bit - let r_range_opcode = Opcode::BlackBoxFuncCall(BlackBoxFuncCall::RANGE { - input: FunctionInput { witness: r_witness, num_bits: self.width }, - }); - let q_range_opcode = Opcode::BlackBoxFuncCall(BlackBoxFuncCall::RANGE { - input: FunctionInput { witness: q_witness, num_bits: 1 }, - }); - new_opcodes.push(r_range_opcode); - new_opcodes.push(q_range_opcode); - - ($name::new(q_witness), new_opcodes, num_witness) - } - - /// Calculate and constrain `self` < `rhs` - pub fn less_than_comparison( - &self, - rhs: &$name, - num_witness: u32, - ) -> ($name, Vec, u32) { - let mut new_opcodes = Vec::new(); - let (mut comparison, extra_opcodes, num_witness) = - self.more_than_eq_comparison(rhs, num_witness); - new_opcodes.extend(extra_opcodes); - comparison.width = 1; - - // `self` < `rhs` == not `self` >= `rhs` - let (less_than, extra_opcodes, num_witness) = comparison.not(num_witness); - new_opcodes.extend(extra_opcodes); - - (less_than, new_opcodes, num_witness) - } - } - }; -} diff --git a/acvm-repo/stdlib/src/blackbox_fallbacks/uint32.rs b/acvm-repo/stdlib/src/blackbox_fallbacks/uint32.rs deleted file mode 100644 index 58314d6ba4c..00000000000 --- a/acvm-repo/stdlib/src/blackbox_fallbacks/uint32.rs +++ /dev/null @@ -1,30 +0,0 @@ -use crate::impl_uint; - -impl_uint!(UInt32, u32, 32); -impl UInt32 { - /// Load a [UInt32] from four [Witness]es each representing a [u8] - pub(crate) fn from_witnesses( - witnesses: &[Witness], - mut num_witness: u32, - ) -> (Vec, Vec, u32) { - let mut new_opcodes = Vec::new(); - let mut variables = VariableStore::new(&mut num_witness); - let mut uint = Vec::new(); - - for i in 0..witnesses.len() / 4 { - let new_witness = variables.new_variable(); - uint.push(UInt32::new(new_witness)); - let mut expr = Expression::from(new_witness); - for j in 0..4 { - let scaling_factor_value = 1 << (8 * (3 - j) as u32); - let scaling_factor = FieldElement::from(scaling_factor_value as u128); - expr.push_addition_term(-scaling_factor, witnesses[i * 4 + j]); - } - - new_opcodes.push(Opcode::Arithmetic(expr)); - } - let num_witness = variables.finalize(); - - (uint, new_opcodes, num_witness) - } -} diff --git a/acvm-repo/stdlib/src/blackbox_fallbacks/uint64.rs b/acvm-repo/stdlib/src/blackbox_fallbacks/uint64.rs deleted file mode 100644 index cddb23275cb..00000000000 --- a/acvm-repo/stdlib/src/blackbox_fallbacks/uint64.rs +++ /dev/null @@ -1,30 +0,0 @@ -use crate::impl_uint; - -impl_uint!(UInt64, u64, 64); -impl UInt64 { - /// Load a [UInt64] from eight [Witness]es each representing a [u8] - pub(crate) fn from_witnesses( - witnesses: &[Witness], - mut num_witness: u32, - ) -> (Vec, Vec, u32) { - let mut new_opcodes = Vec::new(); - let mut variables = VariableStore::new(&mut num_witness); - let mut uint = Vec::new(); - - for i in 0..witnesses.len() / 8 { - let new_witness = variables.new_variable(); - uint.push(UInt64::new(new_witness)); - let mut expr = Expression::from(new_witness); - for j in 0..8 { - let scaling_factor_value: u128 = 1 << (8 * (7 - j) as u32); - let scaling_factor = FieldElement::from(scaling_factor_value); - expr.push_addition_term(-scaling_factor, witnesses[i * 8 + j]); - } - - new_opcodes.push(Opcode::Arithmetic(expr)); - } - let num_witness = variables.finalize(); - - (uint, new_opcodes, num_witness) - } -} diff --git a/acvm-repo/stdlib/src/blackbox_fallbacks/uint8.rs b/acvm-repo/stdlib/src/blackbox_fallbacks/uint8.rs deleted file mode 100644 index 2ffc2cae1be..00000000000 --- a/acvm-repo/stdlib/src/blackbox_fallbacks/uint8.rs +++ /dev/null @@ -1,2 +0,0 @@ -use crate::impl_uint; -impl_uint!(UInt8, u8, 8); diff --git a/acvm-repo/stdlib/src/blackbox_fallbacks/utils.rs b/acvm-repo/stdlib/src/blackbox_fallbacks/utils.rs deleted file mode 100644 index 4921c71c9fe..00000000000 --- a/acvm-repo/stdlib/src/blackbox_fallbacks/utils.rs +++ /dev/null @@ -1,175 +0,0 @@ -use crate::helpers::VariableStore; -use acir::{ - circuit::{ - directives::Directive, - opcodes::{BlackBoxFuncCall, FunctionInput}, - Opcode, - }, - native_types::{Expression, Witness}, - FieldElement, -}; - -fn round_to_nearest_mul_8(num_bits: u32) -> u32 { - let remainder = num_bits % 8; - - if remainder == 0 { - return num_bits; - } - - num_bits + 8 - remainder -} - -pub(crate) fn round_to_nearest_byte(num_bits: u32) -> u32 { - round_to_nearest_mul_8(num_bits) / 8 -} - -pub(crate) fn boolean_expr(expr: &Expression, variables: &mut VariableStore) -> Expression { - &mul_with_witness(expr, expr, variables) - expr -} - -/// Returns an expression which represents `lhs * rhs` -/// -/// If one has multiplicative term and the other is of degree one or more, -/// the function creates [intermediate variables][`Witness`] accordingly. -/// There are two cases where we can optimize the multiplication between two expressions: -/// 1. If both expressions have at most a total degree of 1 in each term, then we can just multiply them -/// as each term in the result will be degree-2. -/// 2. If one expression is a constant, then we can just multiply the constant with the other expression -/// -/// (1) is because an [`Expression`] can hold at most a degree-2 univariate polynomial -/// which is what you get when you multiply two degree-1 univariate polynomials. -pub(crate) fn mul_with_witness( - lhs: &Expression, - rhs: &Expression, - variables: &mut VariableStore, -) -> Expression { - use std::borrow::Cow; - let lhs_is_linear = lhs.is_linear(); - let rhs_is_linear = rhs.is_linear(); - - // Case 1: Both expressions have at most a total degree of 1 in each term - if lhs_is_linear && rhs_is_linear { - return (lhs * rhs) - .expect("one of the expressions is a constant and so this should not fail"); - } - - // Case 2: One or both of the sides needs to be reduced to a degree-1 univariate polynomial - let lhs_reduced = if lhs_is_linear { - Cow::Borrowed(lhs) - } else { - Cow::Owned(variables.new_variable().into()) - }; - - // If the lhs and rhs are the same, then we do not need to reduce - // rhs, we only need to square the lhs. - if lhs == rhs { - return (&*lhs_reduced * &*lhs_reduced) - .expect("Both expressions are reduced to be degree<=1"); - }; - - let rhs_reduced = if rhs_is_linear { - Cow::Borrowed(rhs) - } else { - Cow::Owned(variables.new_variable().into()) - }; - - (&*lhs_reduced * &*rhs_reduced).expect("Both expressions are reduced to be degree<=1") -} - -// Generates opcodes and directives to bit decompose the input `opcode` -// Returns the bits and the updated witness counter -// TODO:Ideally, we return the updated witness counter, or we require the input -// TODO to be a VariableStore. We are not doing this because we want migration to -// TODO be less painful -pub(crate) fn bit_decomposition( - opcode: Expression, - bit_size: u32, - mut num_witness: u32, -) -> (Vec, Vec, u32) { - let mut new_opcodes = Vec::new(); - let mut variables = VariableStore::new(&mut num_witness); - - // First create a witness for each bit - let mut bit_vector = Vec::with_capacity(bit_size as usize); - for _ in 0..bit_size { - bit_vector.push(variables.new_variable()); - } - - // Next create a directive which computes those bits. - new_opcodes.push(Opcode::Directive(Directive::ToLeRadix { - a: opcode.clone(), - b: bit_vector.clone(), - radix: 2, - })); - - // Now apply constraints to the bits such that they are the bit decomposition - // of the input and each bit is actually a bit - let mut binary_exprs = Vec::new(); - let mut bit_decomp_constraint = opcode; - let mut two_pow: FieldElement = FieldElement::one(); - let two = FieldElement::from(2_i128); - for &bit in &bit_vector { - // Bit constraint to ensure each bit is a zero or one; bit^2 - bit = 0 - let expr = boolean_expr(&bit.into(), &mut variables); - binary_exprs.push(Opcode::Arithmetic(expr)); - - // Constraint to ensure that the bits are constrained to be a bit decomposition - // of the input - // ie \sum 2^i * x_i = input - bit_decomp_constraint.push_addition_term(-two_pow, bit); - two_pow = two * two_pow; - } - - new_opcodes.extend(binary_exprs); - bit_decomp_constraint.sort(); // TODO: we have an issue open to check if this is needed. Ideally, we remove it. - new_opcodes.push(Opcode::Arithmetic(bit_decomp_constraint)); - - (new_opcodes, bit_vector, variables.finalize()) -} - -// TODO: Maybe this can be merged with `bit_decomposition` -pub(crate) fn byte_decomposition( - opcode: Expression, - num_bytes: u32, - mut num_witness: u32, -) -> (Vec, Vec, u32) { - let mut new_opcodes = Vec::new(); - let mut variables = VariableStore::new(&mut num_witness); - - // First create a witness for each byte - let mut vector = Vec::with_capacity(num_bytes as usize); - for _ in 0..num_bytes { - vector.push(variables.new_variable()); - } - - // Next create a directive which computes those byte. - new_opcodes.push(Opcode::Directive(Directive::ToLeRadix { - a: opcode.clone(), - b: vector.clone(), - radix: 256, - })); - vector.reverse(); - - // Now apply constraints to the bytes such that they are the byte decomposition - // of the input and each byte is actually a byte - let mut byte_exprs = Vec::new(); - let mut decomp_constraint = opcode; - let byte_shift: u128 = 256; - for (i, v) in vector.iter().enumerate() { - let range = Opcode::BlackBoxFuncCall(BlackBoxFuncCall::RANGE { - input: FunctionInput { witness: *v, num_bits: 8 }, - }); - let scaling_factor_value = byte_shift.pow(num_bytes - 1 - i as u32); - let scaling_factor = FieldElement::from(scaling_factor_value); - - decomp_constraint.push_addition_term(-scaling_factor, *v); - - byte_exprs.push(range); - } - - new_opcodes.extend(byte_exprs); - decomp_constraint.sort(); - new_opcodes.push(Opcode::Arithmetic(decomp_constraint)); - - (new_opcodes, vector, variables.finalize()) -} diff --git a/acvm-repo/stdlib/src/helpers.rs b/acvm-repo/stdlib/src/helpers.rs deleted file mode 100644 index 5ab258368f4..00000000000 --- a/acvm-repo/stdlib/src/helpers.rs +++ /dev/null @@ -1,23 +0,0 @@ -use acir::native_types::Witness; - -// Simple helper struct to keep track of the current witness index -// and create variables -pub struct VariableStore<'a> { - witness_index: &'a mut u32, -} - -impl<'a> VariableStore<'a> { - pub fn new(witness_index: &'a mut u32) -> Self { - Self { witness_index } - } - - pub fn new_variable(&mut self) -> Witness { - let witness = Witness(*self.witness_index); - *self.witness_index += 1; - witness - } - - pub fn finalize(self) -> u32 { - *self.witness_index - } -} diff --git a/acvm-repo/stdlib/src/lib.rs b/acvm-repo/stdlib/src/lib.rs deleted file mode 100644 index 9aecde631fb..00000000000 --- a/acvm-repo/stdlib/src/lib.rs +++ /dev/null @@ -1,7 +0,0 @@ -#![forbid(unsafe_code)] -#![warn(unreachable_pub)] -#![warn(clippy::semicolon_if_nothing_returned)] -#![cfg_attr(not(test), warn(unused_crate_dependencies, unused_extern_crates))] - -pub mod blackbox_fallbacks; -pub mod helpers; diff --git a/compiler/fm/Cargo.toml b/compiler/fm/Cargo.toml index 699f709e9b5..42e4b0c25d7 100644 --- a/compiler/fm/Cargo.toml +++ b/compiler/fm/Cargo.toml @@ -12,5 +12,5 @@ codespan-reporting.workspace = true serde.workspace = true [dev-dependencies] -tempfile = "3.2.0" +tempfile.workspace = true iter-extended.workspace = true diff --git a/compiler/fm/build.rs b/compiler/fm/build.rs deleted file mode 100644 index 747ab4fe1a2..00000000000 --- a/compiler/fm/build.rs +++ /dev/null @@ -1,20 +0,0 @@ -use std::path::Path; - -/// Expects that the given directory is an existing path -fn rerun_if_stdlib_changes(directory: &Path) { - for entry in std::fs::read_dir(directory).unwrap() { - let path = entry.unwrap().path(); - - if path.is_dir() { - rerun_if_stdlib_changes(&path); - } else { - // Tell Cargo that if the given file changes, to rerun this build script. - println!("cargo:rerun-if-changed={}", path.to_string_lossy()); - } - } -} - -fn main() { - let stdlib_src_dir = Path::new("../../noir_stdlib/"); - rerun_if_stdlib_changes(stdlib_src_dir); -} diff --git a/compiler/fm/src/lib.rs b/compiler/fm/src/lib.rs index 2a54e58d3b9..4870a6c283b 100644 --- a/compiler/fm/src/lib.rs +++ b/compiler/fm/src/lib.rs @@ -88,9 +88,9 @@ impl FileManager { assert!(old_value.is_none(), "ice: the same path was inserted into the file manager twice"); } - pub fn fetch_file(&self, file_id: FileId) -> File { + pub fn fetch_file(&self, file_id: FileId) -> &str { // Unwrap as we ensure that all file_id's map to a corresponding file in the file map - self.file_map.get_file(file_id).unwrap() + self.file_map.get_file(file_id).unwrap().source() } pub fn path(&self, file_id: FileId) -> &Path { @@ -99,54 +99,12 @@ impl FileManager { self.id_to_path.get(&file_id).unwrap().as_path() } - // TODO: This should also ideally not be here, so that the file manager - // TODO: does not know about rust modules. - // TODO: Ideally this is moved to def_collector_mod and we make this method accept a FileManager - pub fn find_module(&self, anchor: FileId, mod_name: &str) -> Result { - let anchor_path = self.path(anchor).with_extension(""); - let anchor_dir = anchor_path.parent().unwrap(); - - // if `anchor` is a `main.nr`, `lib.nr`, `mod.nr` or `{mod_name}.nr`, we check siblings of - // the anchor at `base/mod_name.nr`. - let candidate = if should_check_siblings_for_module(&anchor_path, anchor_dir) { - anchor_dir.join(format!("{mod_name}.{FILE_EXTENSION}")) - } else { - // Otherwise, we check for children of the anchor at `base/anchor/mod_name.nr` - anchor_path.join(format!("{mod_name}.{FILE_EXTENSION}")) - }; - - self.name_to_id(candidate.clone()) - .ok_or_else(|| candidate.as_os_str().to_string_lossy().to_string()) - } - // TODO: This should accept a &Path instead of a PathBuf pub fn name_to_id(&self, file_name: PathBuf) -> Option { self.file_map.get_file_id(&PathString::from_path(file_name)) } } -// TODO: This should not be here because the file manager should not know about the -// TODO: rust modules. See comment on `find_module`` -// TODO: Moreover, the check for main, lib, mod should ideally not be done here -/// Returns true if a module's child module's are expected to be in the same directory. -/// Returns false if they are expected to be in a subdirectory matching the name of the module. -fn should_check_siblings_for_module(module_path: &Path, parent_path: &Path) -> bool { - if let Some(filename) = module_path.file_stem() { - // This check also means a `main.nr` or `lib.nr` file outside of the crate root would - // check its same directory for child modules instead of a subdirectory. Should we prohibit - // `main.nr` and `lib.nr` files outside of the crate root? - filename == "main" - || filename == "lib" - || filename == "mod" - || Some(filename) == parent_path.file_stem() - } else { - // If there's no filename, we arbitrarily return true. - // Alternatively, we could panic, but this is left to a different step where we - // ideally have some source location to issue an error. - true - } -} - pub trait NormalizePath { /// Replacement for `std::fs::canonicalize` that doesn't verify the path exists. /// @@ -236,22 +194,6 @@ mod tests { file_path } - #[test] - fn path_resolve_file_module() { - let dir = tempdir().unwrap(); - - let entry_file_name = Path::new("my_dummy_file.nr"); - create_dummy_file(&dir, entry_file_name); - - let mut fm = FileManager::new(dir.path()); - - let file_id = fm.add_file_with_source(entry_file_name, "fn foo() {}".to_string()).unwrap(); - - let dep_file_name = Path::new("foo.nr"); - create_dummy_file(&dir, dep_file_name); - fm.find_module(file_id, "foo").unwrap_err(); - } - #[test] fn path_resolve_file_module_other_ext() { let dir = tempdir().unwrap(); @@ -265,47 +207,6 @@ mod tests { assert!(fm.path(file_id).ends_with("foo.nr")); } - #[test] - fn path_resolve_sub_module() { - let dir = tempdir().unwrap(); - let mut fm = FileManager::new(dir.path()); - - // Create a lib.nr file at the root. - // we now have dir/lib.nr - let lib_nr_path = create_dummy_file(&dir, Path::new("lib.nr")); - let file_id = fm - .add_file_with_source(lib_nr_path.as_path(), "fn foo() {}".to_string()) - .expect("could not add file to file manager and obtain a FileId"); - - // Create a sub directory - // we now have: - // - dir/lib.nr - // - dir/sub_dir - let sub_dir = TempDir::new_in(&dir).unwrap(); - let sub_dir_name = sub_dir.path().file_name().unwrap().to_str().unwrap(); - - // Add foo.nr to the subdirectory - // we no have: - // - dir/lib.nr - // - dir/sub_dir/foo.nr - let foo_nr_path = create_dummy_file(&sub_dir, Path::new("foo.nr")); - fm.add_file_with_source(foo_nr_path.as_path(), "fn foo() {}".to_string()); - - // Add a parent module for the sub_dir - // we no have: - // - dir/lib.nr - // - dir/sub_dir.nr - // - dir/sub_dir/foo.nr - let sub_dir_nr_path = create_dummy_file(&dir, Path::new(&format!("{sub_dir_name}.nr"))); - fm.add_file_with_source(sub_dir_nr_path.as_path(), "fn foo() {}".to_string()); - - // First check for the sub_dir.nr file and add it to the FileManager - let sub_dir_file_id = fm.find_module(file_id, sub_dir_name).unwrap(); - - // Now check for files in it's subdirectory - fm.find_module(sub_dir_file_id, "foo").unwrap(); - } - /// Tests that two identical files that have different paths are treated as the same file /// e.g. if we start in the dir ./src and have a file ../../foo.nr /// that should be treated as the same file as ../ starting in ./ diff --git a/compiler/noirc_driver/Cargo.toml b/compiler/noirc_driver/Cargo.toml index e5a837e6822..7f431db4398 100644 --- a/compiler/noirc_driver/Cargo.toml +++ b/compiler/noirc_driver/Cargo.toml @@ -22,5 +22,6 @@ fm.workspace = true serde.workspace = true fxhash.workspace = true rust-embed = "6.6.0" +log.workspace = true aztec_macros = { path = "../../aztec_macros" } diff --git a/compiler/noirc_driver/build.rs b/compiler/noirc_driver/build.rs index 6bef7f1fda7..73a56142075 100644 --- a/compiler/noirc_driver/build.rs +++ b/compiler/noirc_driver/build.rs @@ -1,4 +1,5 @@ const GIT_COMMIT: &&str = &"GIT_COMMIT"; +use std::path::Path; fn main() { // Only use build_data if the environment variable isn't set @@ -8,4 +9,21 @@ fn main() { build_data::set_GIT_DIRTY(); build_data::no_debug_rebuilds(); } + + let stdlib_src_dir = Path::new("../../noir_stdlib/"); + rerun_if_stdlib_changes(stdlib_src_dir); +} + +/// Expects that the given directory is an existing path +fn rerun_if_stdlib_changes(directory: &Path) { + for entry in std::fs::read_dir(directory).unwrap() { + let path = entry.unwrap().path(); + + if path.is_dir() { + rerun_if_stdlib_changes(&path); + } else { + // Tell Cargo that if the given file changes, to rerun this build script. + println!("cargo:rerun-if-changed={}", path.to_string_lossy()); + } + } } diff --git a/compiler/noirc_driver/src/debug.rs b/compiler/noirc_driver/src/debug.rs index 144e636b534..84a3e143357 100644 --- a/compiler/noirc_driver/src/debug.rs +++ b/compiler/noirc_driver/src/debug.rs @@ -31,7 +31,7 @@ pub(crate) fn filter_relevant_files( let mut file_map = BTreeMap::new(); for file_id in files_with_debug_symbols { - let file_source = file_manager.fetch_file(file_id).source(); + let file_source = file_manager.fetch_file(file_id); file_map.insert( file_id, diff --git a/compiler/noirc_driver/src/lib.rs b/compiler/noirc_driver/src/lib.rs index c326d04c84d..24b159568f2 100644 --- a/compiler/noirc_driver/src/lib.rs +++ b/compiler/noirc_driver/src/lib.rs @@ -147,6 +147,8 @@ pub fn check_crate( deny_warnings: bool, disable_macros: bool, ) -> CompilationResult<()> { + log::trace!("Start checking crate"); + let macros: Vec<&dyn MacroProcessor> = if disable_macros { vec![] } else { @@ -160,6 +162,8 @@ pub fn check_crate( diagnostic.in_file(file_id) })); + log::trace!("Finish checking crate"); + if has_errors(&errors, deny_warnings) { Err(errors) } else { @@ -372,6 +376,7 @@ pub fn compile_no_check( force_compile || options.print_acir || options.show_brillig || options.show_ssa; if !force_compile && hashes_match { + log::info!("Program matches existing artifact, returning early"); return Ok(cached_program.expect("cache must exist for hashes to match")); } let visibility = program.return_visibility; diff --git a/compiler/noirc_errors/Cargo.toml b/compiler/noirc_errors/Cargo.toml index 0cb6afc73bd..812a507550c 100644 --- a/compiler/noirc_errors/Cargo.toml +++ b/compiler/noirc_errors/Cargo.toml @@ -15,3 +15,4 @@ fm.workspace = true chumsky.workspace = true serde.workspace = true serde_with = "3.2.0" +log.workspace = true diff --git a/compiler/noirc_errors/src/debug_info.rs b/compiler/noirc_errors/src/debug_info.rs index 888c24adc1a..3ae5c193e39 100644 --- a/compiler/noirc_errors/src/debug_info.rs +++ b/compiler/noirc_errors/src/debug_info.rs @@ -39,6 +39,7 @@ impl DebugInfo { /// renders the old `OpcodeLocation`s invalid. The AcirTransformationMap is able to map the old `OpcodeLocation` to the new ones. /// Note: One old `OpcodeLocation` might have transformed into more than one new `OpcodeLocation`. pub fn update_acir(&mut self, update_map: AcirTransformationMap) { + log::trace!("Start debug info update"); let old_locations = mem::take(&mut self.locations); for (old_opcode_location, source_locations) in old_locations { @@ -46,6 +47,7 @@ impl DebugInfo { self.locations.insert(new_opcode_location, source_locations.clone()); }); } + log::trace!("Finish debug info update"); } pub fn opcode_location(&self, loc: &OpcodeLocation) -> Option> { diff --git a/compiler/noirc_evaluator/Cargo.toml b/compiler/noirc_evaluator/Cargo.toml index 933ec2b300c..6e7152c6d71 100644 --- a/compiler/noirc_evaluator/Cargo.toml +++ b/compiler/noirc_evaluator/Cargo.toml @@ -17,3 +17,4 @@ thiserror.workspace = true num-bigint = "0.4" im = { version = "15.1", features = ["serde"] } serde.workspace = true +log.workspace = true diff --git a/compiler/noirc_evaluator/src/ssa.rs b/compiler/noirc_evaluator/src/ssa.rs index 8e1c62edc69..6a02a5f6edc 100644 --- a/compiler/noirc_evaluator/src/ssa.rs +++ b/compiler/noirc_evaluator/src/ssa.rs @@ -42,6 +42,7 @@ pub(crate) fn optimize_into_acir( ) -> Result { let abi_distinctness = program.return_distinctness; + log::trace!("Start SSA generation"); let ssa_builder = SsaBuilder::new(program, print_ssa_passes)? .run_pass(Ssa::defunctionalize, "After Defunctionalization:") .run_pass(Ssa::inline_functions, "After Inlining:") @@ -69,9 +70,15 @@ pub(crate) fn optimize_into_acir( let ssa = ssa_builder .run_pass(Ssa::fill_internal_slices, "After Fill Internal Slice Dummy Data:") .finish(); + log::trace!("Finish SSA generation"); let last_array_uses = ssa.find_last_array_uses(); - ssa.into_acir(brillig, abi_distinctness, &last_array_uses) + + log::trace!("Start ACIR generation"); + let acir = ssa.into_acir(brillig, abi_distinctness, &last_array_uses); + log::trace!("Finish ACIR generation"); + + acir } /// Compiles the [`Program`] into [`ACIR`][acvm::acir::circuit::Circuit]. @@ -83,6 +90,8 @@ pub fn create_circuit( enable_ssa_logging: bool, enable_brillig_logging: bool, ) -> Result<(Circuit, DebugInfo, Vec, Vec, Vec), RuntimeError> { + log::trace!("Start circuit generation"); + let func_sig = program.main_function_signature.clone(); let mut generated_acir = optimize_into_acir(program, enable_ssa_logging, enable_brillig_logging)?; @@ -124,6 +133,8 @@ pub fn create_circuit( let (optimized_circuit, transformation_map) = acvm::compiler::optimize(circuit); debug_info.update_acir(transformation_map); + log::trace!("Finish circuit generation"); + Ok((optimized_circuit, debug_info, input_witnesses, return_witnesses, warnings)) } diff --git a/compiler/noirc_evaluator/src/ssa/acir_gen/acir_ir/acir_variable.rs b/compiler/noirc_evaluator/src/ssa/acir_gen/acir_ir/acir_variable.rs index 712913841f3..1f85145260d 100644 --- a/compiler/noirc_evaluator/src/ssa/acir_gen/acir_ir/acir_variable.rs +++ b/compiler/noirc_evaluator/src/ssa/acir_gen/acir_ir/acir_variable.rs @@ -1605,28 +1605,28 @@ fn execute_brillig( _signature: &[u8], _message: &[u8], ) -> Result { - Err(BlackBoxResolutionError::Unsupported(BlackBoxFunc::SchnorrVerify)) + unimplemented!("SchnorrVerify is not supported") } fn pedersen_commitment( &self, _inputs: &[FieldElement], _domain_separator: u32, ) -> Result<(FieldElement, FieldElement), BlackBoxResolutionError> { - Err(BlackBoxResolutionError::Unsupported(BlackBoxFunc::PedersenCommitment)) + unimplemented!("PedersenCommitment is not supported") } fn pedersen_hash( &self, _inputs: &[FieldElement], _domain_separator: u32, ) -> Result { - Err(BlackBoxResolutionError::Unsupported(BlackBoxFunc::PedersenHash)) + unimplemented!("PedersenHash is not supported") } fn fixed_base_scalar_mul( &self, _low: &FieldElement, _high: &FieldElement, ) -> Result<(FieldElement, FieldElement), BlackBoxResolutionError> { - Err(BlackBoxResolutionError::Unsupported(BlackBoxFunc::FixedBaseScalarMul)) + unimplemented!("FixedBaseScalarMul is not supported") } } diff --git a/compiler/noirc_frontend/Cargo.toml b/compiler/noirc_frontend/Cargo.toml index 6f3c35a814a..aa3a8e9f6b8 100644 --- a/compiler/noirc_frontend/Cargo.toml +++ b/compiler/noirc_frontend/Cargo.toml @@ -22,7 +22,9 @@ serde.workspace = true rustc-hash = "1.1.0" small-ord-set = "0.1.3" regex = "1.9.1" +log.workspace = true [dev-dependencies] strum = "0.24" strum_macros = "0.24" +tempfile.workspace = true diff --git a/compiler/noirc_frontend/src/hir/def_collector/dc_mod.rs b/compiler/noirc_frontend/src/hir/def_collector/dc_mod.rs index aa1c658bade..04791b11b2a 100644 --- a/compiler/noirc_frontend/src/hir/def_collector/dc_mod.rs +++ b/compiler/noirc_frontend/src/hir/def_collector/dc_mod.rs @@ -1,7 +1,7 @@ -use std::{collections::HashMap, vec}; +use std::{collections::HashMap, path::Path, vec}; use acvm::acir::acir_field::FieldOptions; -use fm::FileId; +use fm::{FileId, FileManager, FILE_EXTENSION}; use noirc_errors::Location; use crate::{ @@ -524,7 +524,7 @@ impl<'a> ModCollector<'a> { ) -> Vec<(CompilationError, FileId)> { let mut errors: Vec<(CompilationError, FileId)> = vec![]; let child_file_id = - match context.file_manager.find_module(self.file_id, &mod_name.0.contents) { + match find_module(&context.file_manager, self.file_id, &mod_name.0.contents) { Ok(child_file_id) => child_file_id, Err(expected_path) => { let mod_name = mod_name.clone(); @@ -628,3 +628,116 @@ impl<'a> ModCollector<'a> { Ok(LocalModuleId(module_id)) } } + +fn find_module( + file_manager: &FileManager, + anchor: FileId, + mod_name: &str, +) -> Result { + let anchor_path = file_manager.path(anchor).with_extension(""); + let anchor_dir = anchor_path.parent().unwrap(); + + // if `anchor` is a `main.nr`, `lib.nr`, `mod.nr` or `{mod_name}.nr`, we check siblings of + // the anchor at `base/mod_name.nr`. + let candidate = if should_check_siblings_for_module(&anchor_path, anchor_dir) { + anchor_dir.join(format!("{mod_name}.{FILE_EXTENSION}")) + } else { + // Otherwise, we check for children of the anchor at `base/anchor/mod_name.nr` + anchor_path.join(format!("{mod_name}.{FILE_EXTENSION}")) + }; + + file_manager + .name_to_id(candidate.clone()) + .ok_or_else(|| candidate.as_os_str().to_string_lossy().to_string()) +} + +/// Returns true if a module's child modules are expected to be in the same directory. +/// Returns false if they are expected to be in a subdirectory matching the name of the module. +fn should_check_siblings_for_module(module_path: &Path, parent_path: &Path) -> bool { + if let Some(filename) = module_path.file_stem() { + // This check also means a `main.nr` or `lib.nr` file outside of the crate root would + // check its same directory for child modules instead of a subdirectory. Should we prohibit + // `main.nr` and `lib.nr` files outside of the crate root? + filename == "main" + || filename == "lib" + || filename == "mod" + || Some(filename) == parent_path.file_stem() + } else { + // If there's no filename, we arbitrarily return true. + // Alternatively, we could panic, but this is left to a different step where we + // ideally have some source location to issue an error. + true + } +} + +#[cfg(test)] +mod tests { + use super::*; + + use std::path::PathBuf; + use tempfile::{tempdir, TempDir}; + + // Returns the absolute path to the file + fn create_dummy_file(dir: &TempDir, file_name: &Path) -> PathBuf { + let file_path = dir.path().join(file_name); + let _file = std::fs::File::create(&file_path).unwrap(); + file_path + } + + #[test] + fn path_resolve_file_module() { + let dir = tempdir().unwrap(); + + let entry_file_name = Path::new("my_dummy_file.nr"); + create_dummy_file(&dir, entry_file_name); + + let mut fm = FileManager::new(dir.path()); + + let file_id = fm.add_file_with_source(entry_file_name, "fn foo() {}".to_string()).unwrap(); + + let dep_file_name = Path::new("foo.nr"); + create_dummy_file(&dir, dep_file_name); + find_module(&fm, file_id, "foo").unwrap_err(); + } + + #[test] + fn path_resolve_sub_module() { + let dir = tempdir().unwrap(); + let mut fm = FileManager::new(dir.path()); + + // Create a lib.nr file at the root. + // we now have dir/lib.nr + let lib_nr_path = create_dummy_file(&dir, Path::new("lib.nr")); + let file_id = fm + .add_file_with_source(lib_nr_path.as_path(), "fn foo() {}".to_string()) + .expect("could not add file to file manager and obtain a FileId"); + + // Create a sub directory + // we now have: + // - dir/lib.nr + // - dir/sub_dir + let sub_dir = TempDir::new_in(&dir).unwrap(); + let sub_dir_name = sub_dir.path().file_name().unwrap().to_str().unwrap(); + + // Add foo.nr to the subdirectory + // we no have: + // - dir/lib.nr + // - dir/sub_dir/foo.nr + let foo_nr_path = create_dummy_file(&sub_dir, Path::new("foo.nr")); + fm.add_file_with_source(foo_nr_path.as_path(), "fn foo() {}".to_string()); + + // Add a parent module for the sub_dir + // we no have: + // - dir/lib.nr + // - dir/sub_dir.nr + // - dir/sub_dir/foo.nr + let sub_dir_nr_path = create_dummy_file(&dir, Path::new(&format!("{sub_dir_name}.nr"))); + fm.add_file_with_source(sub_dir_nr_path.as_path(), "fn foo() {}".to_string()); + + // First check for the sub_dir.nr file and add it to the FileManager + let sub_dir_file_id = find_module(&fm, file_id, sub_dir_name).unwrap(); + + // Now check for files in it's subdirectory + find_module(&fm, sub_dir_file_id, "foo").unwrap(); + } +} diff --git a/compiler/noirc_frontend/src/hir/def_map/mod.rs b/compiler/noirc_frontend/src/hir/def_map/mod.rs index 026f407981d..20f05532ce4 100644 --- a/compiler/noirc_frontend/src/hir/def_map/mod.rs +++ b/compiler/noirc_frontend/src/hir/def_map/mod.rs @@ -270,8 +270,8 @@ pub struct Contract { /// Given a FileId, fetch the File, from the FileManager and parse it's content pub fn parse_file(fm: &FileManager, file_id: FileId) -> (ParsedModule, Vec) { - let file = fm.fetch_file(file_id); - parse_program(file.source()) + let file_source = fm.fetch_file(file_id); + parse_program(file_source) } impl std::ops::Index for CrateDefMap { diff --git a/compiler/noirc_frontend/src/hir/mod.rs b/compiler/noirc_frontend/src/hir/mod.rs index 3a435f302af..adeca7cf2ba 100644 --- a/compiler/noirc_frontend/src/hir/mod.rs +++ b/compiler/noirc_frontend/src/hir/mod.rs @@ -36,7 +36,8 @@ pub enum FunctionNameMatch<'a> { } impl Context { - pub fn new(file_manager: FileManager, crate_graph: CrateGraph) -> Context { + pub fn new(file_manager: FileManager) -> Context { + let crate_graph = CrateGraph::default(); Context { def_interner: NodeInterner::default(), def_maps: BTreeMap::new(), diff --git a/compiler/noirc_frontend/src/monomorphization/mod.rs b/compiler/noirc_frontend/src/monomorphization/mod.rs index 78cde11593b..52b8d5bfd79 100644 --- a/compiler/noirc_frontend/src/monomorphization/mod.rs +++ b/compiler/noirc_frontend/src/monomorphization/mod.rs @@ -92,6 +92,7 @@ type HirType = crate::Type; /// this function. Typically, this is the function named "main" in the source project, /// but it can also be, for example, an arbitrary test function for running `nargo test`. pub fn monomorphize(main: node_interner::FuncId, interner: &NodeInterner) -> Program { + log::trace!("Start monomorphization"); let mut monomorphizer = Monomorphizer::new(interner); let function_sig = monomorphizer.compile_main(main); @@ -106,6 +107,8 @@ pub fn monomorphize(main: node_interner::FuncId, interner: &NodeInterner) -> Pro let functions = vecmap(monomorphizer.finished_functions, |(_, f)| f); let FuncMeta { return_distinctness, return_visibility, .. } = interner.function_meta(&main); + + log::trace!("Finish monomorphization"); Program::new( functions, function_sig, diff --git a/compiler/noirc_frontend/src/parser/parser.rs b/compiler/noirc_frontend/src/parser/parser.rs index a97637642af..660c85759b9 100644 --- a/compiler/noirc_frontend/src/parser/parser.rs +++ b/compiler/noirc_frontend/src/parser/parser.rs @@ -1119,14 +1119,7 @@ fn int_type() -> impl NoirParser { Err(ParserError::expected_label(ParsingRuleLabel::IntegerType, unexpected, span)) } })) - .validate(|(_, token), span, emit| { - let typ = UnresolvedTypeData::from_int_token(token).with_span(span); - if let UnresolvedTypeData::Integer(crate::Signedness::Signed, _) = &typ.typ { - let reason = ParserErrorReason::ExperimentalFeature("Signed integer types"); - emit(ParserError::with_reason(reason, span)); - } - typ - }) + .map_with_span(|(_, token), span| UnresolvedTypeData::from_int_token(token).with_span(span)) } fn named_type(type_parser: impl NoirParser) -> impl NoirParser { diff --git a/compiler/noirc_frontend/src/tests.rs b/compiler/noirc_frontend/src/tests.rs index 3f4755aa0ef..60498b22c96 100644 --- a/compiler/noirc_frontend/src/tests.rs +++ b/compiler/noirc_frontend/src/tests.rs @@ -53,8 +53,7 @@ mod test { ) -> (ParsedModule, Context, Vec<(CompilationError, FileId)>) { let root = std::path::Path::new("/"); let fm = FileManager::new(root); - let graph = CrateGraph::default(); - let mut context = Context::new(fm, graph); + let mut context = Context::new(fm); let root_file_id = FileId::dummy(); let root_crate_id = context.crate_graph.add_crate_root(root_file_id); let (program, parser_errors) = parse_program(src); diff --git a/compiler/source-resolver/lib-node/index.js b/compiler/source-resolver/lib-node/index.js new file mode 100644 index 00000000000..7de637b6853 --- /dev/null +++ b/compiler/source-resolver/lib-node/index.js @@ -0,0 +1,32 @@ +"use strict"; +Object.defineProperty(exports, "__esModule", { value: true }); +exports.initializeResolver = exports.read_file = void 0; +let resolveFunction = null; +const read_file = function (source_id) { + if (resolveFunction) { + const result = resolveFunction(source_id); + if (typeof result === 'string') { + return result; + } + else { + throw new Error('Noir source resolver function MUST return String synchronously. Are you trying to return anything else, eg. `Promise`?'); + } + } + else { + throw new Error('Not yet initialized. Use initializeResolver(() => string)'); + } +}; +exports.read_file = read_file; +function initialize(noir_resolver) { + if (typeof noir_resolver === 'function') { + return noir_resolver; + } + else { + throw new Error('Provided Noir Resolver is not a function, hint: use function(module_id) => NoirSource as second parameter'); + } +} +function initializeResolver(resolver) { + resolveFunction = initialize(resolver); +} +exports.initializeResolver = initializeResolver; +//# sourceMappingURL=index.js.map \ No newline at end of file diff --git a/compiler/source-resolver/lib-node/index.js.map b/compiler/source-resolver/lib-node/index.js.map new file mode 100644 index 00000000000..4ac7301ddc9 --- /dev/null +++ b/compiler/source-resolver/lib-node/index.js.map @@ -0,0 +1 @@ +{"version":3,"file":"index.js","sourceRoot":"","sources":["../src/index.ts"],"names":[],"mappings":";;;AAAA,IAAI,eAAe,GAA2C,IAAI,CAAC;AAE5D,MAAM,SAAS,GAAG,UAAU,SAAiB;IAClD,IAAI,eAAe,EAAE;QACnB,MAAM,MAAM,GAAG,eAAe,CAAC,SAAS,CAAC,CAAC;QAE1C,IAAI,OAAO,MAAM,KAAK,QAAQ,EAAE;YAC9B,OAAO,MAAM,CAAC;SACf;aAAM;YACL,MAAM,IAAI,KAAK,CACb,wHAAwH,CACzH,CAAC;SACH;KACF;SAAM;QACL,MAAM,IAAI,KAAK,CAAC,2DAA2D,CAAC,CAAC;KAC9E;AACH,CAAC,CAAC;AAdW,QAAA,SAAS,aAcpB;AAEF,SAAS,UAAU,CAAC,aAA4C;IAC9D,IAAI,OAAO,aAAa,KAAK,UAAU,EAAE;QACvC,OAAO,aAAa,CAAC;KACtB;SAAM;QACL,MAAM,IAAI,KAAK,CACb,2GAA2G,CAC5G,CAAC;KACH;AACH,CAAC;AAED,SAAgB,kBAAkB,CAAC,QAAuC;IACxE,eAAe,GAAG,UAAU,CAAC,QAAQ,CAAC,CAAC;AACzC,CAAC;AAFD,gDAEC"} \ No newline at end of file diff --git a/compiler/source-resolver/lib-node/index_node.js b/compiler/source-resolver/lib-node/index_node.js new file mode 100644 index 00000000000..7d54737ce49 --- /dev/null +++ b/compiler/source-resolver/lib-node/index_node.js @@ -0,0 +1,20 @@ +"use strict"; +/// +Object.defineProperty(exports, "__esModule", { value: true }); +exports.read_file = exports.initializeResolver = void 0; +const index_js_1 = require("./index.js"); +Object.defineProperty(exports, "initializeResolver", { enumerable: true, get: function () { return index_js_1.initializeResolver; } }); +Object.defineProperty(exports, "read_file", { enumerable: true, get: function () { return index_js_1.read_file; } }); +(0, index_js_1.initializeResolver)((source_id) => { + let fileContent = ''; + try { + // eslint-disable-next-line @typescript-eslint/no-var-requires + const fs = require('fs'); + fileContent = fs.readFileSync(source_id, { encoding: 'utf8' }); + } + catch (e) { + console.log(e); + } + return fileContent; +}); +//# sourceMappingURL=index_node.js.map \ No newline at end of file diff --git a/compiler/source-resolver/lib-node/index_node.js.map b/compiler/source-resolver/lib-node/index_node.js.map new file mode 100644 index 00000000000..920818232c3 --- /dev/null +++ b/compiler/source-resolver/lib-node/index_node.js.map @@ -0,0 +1 @@ +{"version":3,"file":"index_node.js","sourceRoot":"","sources":["../src/index_node.ts"],"names":[],"mappings":";AAAA,8BAA8B;;;AAE9B,yCAA2D;AAclD,mGAdA,6BAAkB,OAcA;AAAE,0FAdA,oBAAS,OAcA;AAZtC,IAAA,6BAAkB,EAAC,CAAC,SAAiB,EAAE,EAAE;IACvC,IAAI,WAAW,GAAG,EAAE,CAAC;IACrB,IAAI;QACF,8DAA8D;QAC9D,MAAM,EAAE,GAAG,OAAO,CAAC,IAAI,CAAC,CAAC;QACzB,WAAW,GAAG,EAAE,CAAC,YAAY,CAAC,SAAS,EAAE,EAAE,QAAQ,EAAE,MAAM,EAAE,CAAW,CAAC;KAC1E;IAAC,OAAO,CAAC,EAAE;QACV,OAAO,CAAC,GAAG,CAAC,CAAC,CAAC,CAAC;KAChB;IACD,OAAO,WAAW,CAAC;AACrB,CAAC,CAAC,CAAC"} \ No newline at end of file diff --git a/compiler/source-resolver/lib/index.js b/compiler/source-resolver/lib/index.js new file mode 100644 index 00000000000..079b4460592 --- /dev/null +++ b/compiler/source-resolver/lib/index.js @@ -0,0 +1,27 @@ +let resolveFunction = null; +export const read_file = function (source_id) { + if (resolveFunction) { + const result = resolveFunction(source_id); + if (typeof result === 'string') { + return result; + } + else { + throw new Error('Noir source resolver function MUST return String synchronously. Are you trying to return anything else, eg. `Promise`?'); + } + } + else { + throw new Error('Not yet initialized. Use initializeResolver(() => string)'); + } +}; +function initialize(noir_resolver) { + if (typeof noir_resolver === 'function') { + return noir_resolver; + } + else { + throw new Error('Provided Noir Resolver is not a function, hint: use function(module_id) => NoirSource as second parameter'); + } +} +export function initializeResolver(resolver) { + resolveFunction = initialize(resolver); +} +//# sourceMappingURL=index.js.map \ No newline at end of file diff --git a/compiler/source-resolver/lib/index.js.map b/compiler/source-resolver/lib/index.js.map new file mode 100644 index 00000000000..e62ae1e4c8a --- /dev/null +++ b/compiler/source-resolver/lib/index.js.map @@ -0,0 +1 @@ +{"version":3,"file":"index.js","sourceRoot":"","sources":["../src/index.ts"],"names":[],"mappings":"AAAA,IAAI,eAAe,GAA2C,IAAI,CAAC;AAEnE,MAAM,CAAC,MAAM,SAAS,GAAG,UAAU,SAAiB;IAClD,IAAI,eAAe,EAAE;QACnB,MAAM,MAAM,GAAG,eAAe,CAAC,SAAS,CAAC,CAAC;QAE1C,IAAI,OAAO,MAAM,KAAK,QAAQ,EAAE;YAC9B,OAAO,MAAM,CAAC;SACf;aAAM;YACL,MAAM,IAAI,KAAK,CACb,wHAAwH,CACzH,CAAC;SACH;KACF;SAAM;QACL,MAAM,IAAI,KAAK,CAAC,2DAA2D,CAAC,CAAC;KAC9E;AACH,CAAC,CAAC;AAEF,SAAS,UAAU,CAAC,aAA4C;IAC9D,IAAI,OAAO,aAAa,KAAK,UAAU,EAAE;QACvC,OAAO,aAAa,CAAC;KACtB;SAAM;QACL,MAAM,IAAI,KAAK,CACb,2GAA2G,CAC5G,CAAC;KACH;AACH,CAAC;AAED,MAAM,UAAU,kBAAkB,CAAC,QAAuC;IACxE,eAAe,GAAG,UAAU,CAAC,QAAQ,CAAC,CAAC;AACzC,CAAC"} \ No newline at end of file diff --git a/compiler/wasm/package.json b/compiler/wasm/package.json index b57f700b661..352142eb3e3 100644 --- a/compiler/wasm/package.json +++ b/compiler/wasm/package.json @@ -3,7 +3,7 @@ "collaborators": [ "The Noir Team " ], - "version": "0.20.0", + "version": "0.22.0", "license": "(MIT OR Apache-2.0)", "main": "./nodejs/noir_wasm.js", "types": "./web/noir_wasm.d.ts", diff --git a/compiler/wasm/src/compile.rs b/compiler/wasm/src/compile.rs index 13b366819b0..4012effd947 100644 --- a/compiler/wasm/src/compile.rs +++ b/compiler/wasm/src/compile.rs @@ -11,7 +11,7 @@ use noirc_driver::{ CompiledContract, CompiledProgram, NOIR_ARTIFACT_VERSION_STRING, }; use noirc_frontend::{ - graph::{CrateGraph, CrateId, CrateName}, + graph::{CrateId, CrateName}, hir::Context, }; use serde::Deserialize; @@ -20,8 +20,6 @@ use wasm_bindgen::prelude::*; use crate::errors::{CompileError, JsCompileError}; -const BACKEND_IDENTIFIER: &str = "acvm-backend-barretenberg"; - #[wasm_bindgen(typescript_custom_section)] const DEPENDENCY_GRAPH: &'static str = r#" export type DependencyGraph = { @@ -32,14 +30,12 @@ export type DependencyGraph = { export type CompiledContract = { noir_version: string; name: string; - backend: string; functions: Array; events: Array; }; export type CompiledProgram = { noir_version: string; - backend: string; abi: any; bytecode: string; } @@ -120,12 +116,11 @@ impl JsCompileResult { } } -#[derive(Deserialize)] -struct DependencyGraph { - root_dependencies: Vec, - library_dependencies: HashMap>, +#[derive(Deserialize, Default)] +pub(crate) struct DependencyGraph { + pub(crate) root_dependencies: Vec, + pub(crate) library_dependencies: HashMap>, } - #[wasm_bindgen] // This is a map containing the paths of all of the files in the entry-point crate and // the transitive dependencies of the entry-point crate. @@ -133,7 +128,7 @@ struct DependencyGraph { // This is for all intents and purposes the file system that the compiler will use to resolve/compile // files in the crate being compiled and its dependencies. #[derive(Deserialize, Default)] -pub struct PathToFileSourceMap(HashMap); +pub struct PathToFileSourceMap(pub(crate) HashMap); #[wasm_bindgen] impl PathToFileSourceMap { @@ -174,8 +169,7 @@ pub fn compile( let fm = file_manager_with_source_map(file_source_map); - let graph = CrateGraph::default(); - let mut context = Context::new(fm, graph); + let mut context = Context::new(fm); let path = Path::new(&entry_point); let crate_id = prepare_crate(&mut context, path); @@ -184,10 +178,8 @@ pub fn compile( let compile_options = CompileOptions::default(); - // For now we default to plonk width = 3, though we can add it as a parameter - let np_language = acvm::Language::PLONKCSat { width: 3 }; - #[allow(deprecated)] - let is_opcode_supported = acvm::pwg::default_is_opcode_supported(np_language); + // For now we default to a bounded width of 3, though we can add it as a parameter + let expression_width = acvm::ExpressionWidth::Bounded { width: 3 }; if contracts.unwrap_or_default() { let compiled_contract = compile_contract(&mut context, crate_id, &compile_options) @@ -200,9 +192,7 @@ pub fn compile( })? .0; - let optimized_contract = - nargo::ops::optimize_contract(compiled_contract, np_language, &is_opcode_supported) - .expect("Contract optimization failed"); + let optimized_contract = nargo::ops::optimize_contract(compiled_contract, expression_width); let compile_output = preprocess_contract(optimized_contract); Ok(JsCompileResult::new(compile_output)) @@ -217,9 +207,7 @@ pub fn compile( })? .0; - let optimized_program = - nargo::ops::optimize_program(compiled_program, np_language, &is_opcode_supported) - .expect("Program optimization failed"); + let optimized_program = nargo::ops::optimize_program(compiled_program, expression_width); let compile_output = preprocess_program(optimized_program); Ok(JsCompileResult::new(compile_output)) @@ -234,7 +222,7 @@ pub fn compile( // // For all intents and purposes, the file manager being returned // should be considered as immutable. -fn file_manager_with_source_map(source_map: PathToFileSourceMap) -> FileManager { +pub(crate) fn file_manager_with_source_map(source_map: PathToFileSourceMap) -> FileManager { let root = Path::new(""); let mut fm = FileManager::new(root); @@ -283,7 +271,7 @@ fn add_noir_lib(context: &mut Context, library_name: &CrateName) -> CrateId { prepare_dependency(context, &path_to_lib) } -fn preprocess_program(program: CompiledProgram) -> CompileResult { +pub(crate) fn preprocess_program(program: CompiledProgram) -> CompileResult { let debug_artifact = DebugArtifact { debug_symbols: vec![program.debug], file_map: program.file_map, @@ -292,7 +280,6 @@ fn preprocess_program(program: CompiledProgram) -> CompileResult { let preprocessed_program = PreprocessedProgram { hash: program.hash, - backend: String::from(BACKEND_IDENTIFIER), abi: program.abi, noir_version: NOIR_ARTIFACT_VERSION_STRING.to_string(), bytecode: program.circuit, @@ -301,7 +288,8 @@ fn preprocess_program(program: CompiledProgram) -> CompileResult { CompileResult::Program { program: preprocessed_program, debug: debug_artifact } } -fn preprocess_contract(contract: CompiledContract) -> CompileResult { +// TODO: This method should not be doing so much, most of this should be done in nargo or the driver +pub(crate) fn preprocess_contract(contract: CompiledContract) -> CompileResult { let debug_artifact = DebugArtifact { debug_symbols: contract.functions.iter().map(|function| function.debug.clone()).collect(), file_map: contract.file_map, @@ -322,7 +310,6 @@ fn preprocess_contract(contract: CompiledContract) -> CompileResult { let preprocessed_contract = PreprocessedContract { noir_version: String::from(NOIR_ARTIFACT_VERSION_STRING), name: contract.name, - backend: String::from(BACKEND_IDENTIFIER), functions: preprocessed_functions, events: contract.events, }; @@ -333,10 +320,7 @@ fn preprocess_contract(contract: CompiledContract) -> CompileResult { #[cfg(test)] mod test { use noirc_driver::prepare_crate; - use noirc_frontend::{ - graph::{CrateGraph, CrateName}, - hir::Context, - }; + use noirc_frontend::{graph::CrateName, hir::Context}; use crate::compile::PathToFileSourceMap; @@ -348,8 +332,7 @@ mod test { // Add this due to us calling prepare_crate on "/main.nr" below fm.add_file_with_source(Path::new("/main.nr"), "fn foo() {}".to_string()); - let graph = CrateGraph::default(); - let mut context = Context::new(fm, graph); + let mut context = Context::new(fm); prepare_crate(&mut context, Path::new("/main.nr")); context diff --git a/compiler/wasm/src/compile_new.rs b/compiler/wasm/src/compile_new.rs new file mode 100644 index 00000000000..cd09d0fcc49 --- /dev/null +++ b/compiler/wasm/src/compile_new.rs @@ -0,0 +1,334 @@ +use crate::compile::{ + file_manager_with_source_map, preprocess_contract, preprocess_program, JsCompileResult, + PathToFileSourceMap, +}; +use crate::errors::{CompileError, JsCompileError}; +use noirc_driver::{ + add_dep, compile_contract, compile_main, prepare_crate, prepare_dependency, CompileOptions, +}; +use noirc_frontend::{ + graph::{CrateId, CrateName}, + hir::Context, +}; +use std::path::Path; +use wasm_bindgen::prelude::wasm_bindgen; + +/// This is a wrapper class that is wasm-bindgen compatible +/// We do not use js_name and rename it like CrateId because +/// then the impl block is not picked up in javascript. +#[wasm_bindgen] +pub struct CompilerContext { + context: Context, +} + +#[wasm_bindgen(js_name = "CrateId")] +#[derive(Debug, Copy, Clone)] +pub struct CrateIDWrapper(CrateId); + +#[wasm_bindgen] +impl CompilerContext { + #[wasm_bindgen(constructor)] + pub fn new(source_map: PathToFileSourceMap) -> CompilerContext { + console_error_panic_hook::set_once(); + + let fm = file_manager_with_source_map(source_map); + CompilerContext { context: Context::new(fm) } + } + + #[cfg(test)] + pub(crate) fn crate_graph(&self) -> &noirc_frontend::graph::CrateGraph { + &self.context.crate_graph + } + #[cfg(test)] + pub(crate) fn root_crate_id(&self) -> CrateIDWrapper { + CrateIDWrapper(*self.context.root_crate_id()) + } + + // Processes the root crate by adding it to the package graph and automatically + // importing the stdlib as a dependency for it. + // + // Its ID in the package graph is returned + pub fn process_root_crate(&mut self, path_to_crate: String) -> CrateIDWrapper { + let path_to_crate = Path::new(&path_to_crate); + + // Adds the root crate to the crate graph and returns its crate id + CrateIDWrapper(prepare_crate(&mut self.context, path_to_crate)) + } + + pub fn process_dependency_crate(&mut self, path_to_crate: String) -> CrateIDWrapper { + let path_to_crate = Path::new(&path_to_crate); + + // Adds the root crate to the crate graph and returns its crate id + CrateIDWrapper(prepare_dependency(&mut self.context, path_to_crate)) + } + + // Adds a named edge from one crate to the other. + // + // For example, lets say we have two crates CrateId1 and CrateId2 + // This function will add an edge from CrateId1 to CrateId2 and the edge will be named `crate_name` + // + // This essentially says that CrateId1 depends on CrateId2 and the dependency is named `crate_name` + // + // We pass references to &CrateIdWrapper even though it is a copy because Rust's move semantics are + // not respected once we use javascript. ie it will actually allocated a new object in javascript + // then deallocate that object if we do not pass as a reference. + pub fn add_dependency_edge( + &mut self, + crate_name: String, + from: &CrateIDWrapper, + to: &CrateIDWrapper, + ) { + let parsed_crate_name: CrateName = crate_name + .parse() + .unwrap_or_else(|_| panic!("Failed to parse crate name {}", crate_name)); + add_dep(&mut self.context, from.0, to.0, parsed_crate_name); + } + + pub fn compile_program( + mut self, + program_width: usize, + ) -> Result { + let compile_options = CompileOptions::default(); + let np_language = acvm::ExpressionWidth::Bounded { width: program_width }; + + let root_crate_id = *self.context.root_crate_id(); + + let compiled_program = + compile_main(&mut self.context, root_crate_id, &compile_options, None, true) + .map_err(|errs| { + CompileError::with_file_diagnostics( + "Failed to compile program", + errs, + &self.context.file_manager, + ) + })? + .0; + + let optimized_program = nargo::ops::optimize_program(compiled_program, np_language); + + let compile_output = preprocess_program(optimized_program); + Ok(JsCompileResult::new(compile_output)) + } + + pub fn compile_contract( + mut self, + program_width: usize, + ) -> Result { + let compile_options = CompileOptions::default(); + let np_language = acvm::ExpressionWidth::Bounded { width: program_width }; + let root_crate_id = *self.context.root_crate_id(); + + let compiled_contract = + compile_contract(&mut self.context, root_crate_id, &compile_options) + .map_err(|errs| { + CompileError::with_file_diagnostics( + "Failed to compile contract", + errs, + &self.context.file_manager, + ) + })? + .0; + + let optimized_contract = nargo::ops::optimize_contract(compiled_contract, np_language); + + let compile_output = preprocess_contract(optimized_contract); + Ok(JsCompileResult::new(compile_output)) + } +} + +/// This is a method that exposes the same API as `compile` +/// But uses the Context based APi internally +#[wasm_bindgen] +pub fn compile_( + entry_point: String, + contracts: Option, + dependency_graph: Option, + file_source_map: PathToFileSourceMap, +) -> Result { + use std::collections::HashMap; + + console_error_panic_hook::set_once(); + + let dependency_graph: crate::compile::DependencyGraph = + if let Some(dependency_graph) = dependency_graph { + ::into_serde( + &wasm_bindgen::JsValue::from(dependency_graph), + ) + .map_err(|err| err.to_string())? + } else { + crate::compile::DependencyGraph::default() + }; + + let mut compiler_context = CompilerContext::new(file_source_map); + + // Set the root crate + let root_id = compiler_context.process_root_crate(entry_point.clone()); + + let add_noir_lib = |context: &mut CompilerContext, lib_name: &CrateName| -> CrateIDWrapper { + let lib_name_string = lib_name.to_string(); + let path_to_lib = Path::new(&lib_name_string) + .join("lib.nr") + .to_str() + .expect("paths are expected to be valid utf-8") + .to_string(); + context.process_dependency_crate(path_to_lib) + }; + + // Add the dependency graph + let mut crate_names: HashMap = HashMap::new(); + // + // Process the direct dependencies of the root + for lib_name in dependency_graph.root_dependencies { + let lib_name_string = lib_name.to_string(); + + let crate_id = add_noir_lib(&mut compiler_context, &lib_name); + + crate_names.insert(lib_name.clone(), crate_id); + + // Add the dependency edges + compiler_context.add_dependency_edge(lib_name_string, &root_id, &crate_id); + } + + // Process the transitive dependencies of the root + for (lib_name, dependencies) in &dependency_graph.library_dependencies { + // first create the library crate if needed + // this crate might not have been registered yet because of the order of the HashMap + // e.g. {root: [lib1], libs: { lib2 -> [lib3], lib1 -> [lib2] }} + let crate_id = *crate_names + .entry(lib_name.clone()) + .or_insert_with(|| add_noir_lib(&mut compiler_context, lib_name)); + + for dependency_name in dependencies { + let dependency_name_string = dependency_name.to_string(); + + let dep_crate_id = crate_names + .entry(dependency_name.clone()) + .or_insert_with(|| add_noir_lib(&mut compiler_context, dependency_name)); + + compiler_context.add_dependency_edge(dependency_name_string, &crate_id, dep_crate_id); + } + } + + let is_contract = contracts.unwrap_or(false); + let program_width = 3; + + if is_contract { + compiler_context.compile_contract(program_width) + } else { + compiler_context.compile_program(program_width) + } +} + +#[cfg(test)] +mod test { + use noirc_driver::prepare_crate; + use noirc_frontend::hir::Context; + + use crate::compile::{file_manager_with_source_map, PathToFileSourceMap}; + + use std::path::Path; + + use super::CompilerContext; + + fn setup_test_context(source_map: PathToFileSourceMap) -> CompilerContext { + let mut fm = file_manager_with_source_map(source_map); + // Add this due to us calling prepare_crate on "/main.nr" below + fm.add_file_with_source(Path::new("/main.nr"), "fn foo() {}".to_string()); + + let mut context = Context::new(fm); + prepare_crate(&mut context, Path::new("/main.nr")); + + CompilerContext { context } + } + + #[test] + fn test_works_with_empty_dependency_graph() { + let source_map = PathToFileSourceMap::default(); + let context = setup_test_context(source_map); + + // one stdlib + one root crate + assert_eq!(context.crate_graph().number_of_crates(), 2); + } + + #[test] + fn test_works_with_root_dependencies() { + let source_map = PathToFileSourceMap( + vec![(Path::new("lib1/lib.nr").to_path_buf(), "fn foo() {}".to_string())] + .into_iter() + .collect(), + ); + + let mut context = setup_test_context(source_map); + context.process_dependency_crate("lib1/lib.nr".to_string()); + + assert_eq!(context.crate_graph().number_of_crates(), 3); + } + + #[test] + fn test_works_with_duplicate_root_dependencies() { + let source_map = PathToFileSourceMap( + vec![(Path::new("lib1/lib.nr").to_path_buf(), "fn foo() {}".to_string())] + .into_iter() + .collect(), + ); + let mut context = setup_test_context(source_map); + + let lib1_crate_id = context.process_dependency_crate("lib1/lib.nr".to_string()); + let root_crate_id = context.root_crate_id(); + + context.add_dependency_edge("lib1".to_string(), &root_crate_id, &lib1_crate_id); + context.add_dependency_edge("lib1".to_string(), &root_crate_id, &lib1_crate_id); + + assert_eq!(context.crate_graph().number_of_crates(), 3); + } + + #[test] + fn test_works_with_transitive_dependencies() { + let source_map = PathToFileSourceMap( + vec![ + (Path::new("lib1/lib.nr").to_path_buf(), "fn foo() {}".to_string()), + (Path::new("lib2/lib.nr").to_path_buf(), "fn foo() {}".to_string()), + (Path::new("lib3/lib.nr").to_path_buf(), "fn foo() {}".to_string()), + ] + .into_iter() + .collect(), + ); + + let mut context = setup_test_context(source_map); + + let lib1_crate_id = context.process_dependency_crate("lib1/lib.nr".to_string()); + let lib2_crate_id = context.process_dependency_crate("lib2/lib.nr".to_string()); + let lib3_crate_id = context.process_dependency_crate("lib3/lib.nr".to_string()); + let root_crate_id = context.root_crate_id(); + + context.add_dependency_edge("lib1".to_string(), &root_crate_id, &lib1_crate_id); + context.add_dependency_edge("lib2".to_string(), &lib1_crate_id, &lib2_crate_id); + context.add_dependency_edge("lib3".to_string(), &lib2_crate_id, &lib3_crate_id); + + assert_eq!(context.crate_graph().number_of_crates(), 5); + } + + #[test] + fn test_works_with_missing_dependencies() { + let source_map = PathToFileSourceMap( + vec![ + (Path::new("lib1/lib.nr").to_path_buf(), "fn foo() {}".to_string()), + (Path::new("lib2/lib.nr").to_path_buf(), "fn foo() {}".to_string()), + (Path::new("lib3/lib.nr").to_path_buf(), "fn foo() {}".to_string()), + ] + .into_iter() + .collect(), + ); + let mut context = setup_test_context(source_map); + + let lib1_crate_id = context.process_dependency_crate("lib1/lib.nr".to_string()); + let lib2_crate_id = context.process_dependency_crate("lib2/lib.nr".to_string()); + let lib3_crate_id = context.process_dependency_crate("lib3/lib.nr".to_string()); + let root_crate_id = context.root_crate_id(); + + context.add_dependency_edge("lib1".to_string(), &root_crate_id, &lib1_crate_id); + context.add_dependency_edge("lib3".to_string(), &lib2_crate_id, &lib3_crate_id); + + assert_eq!(context.crate_graph().number_of_crates(), 5); + } +} diff --git a/compiler/wasm/src/lib.rs b/compiler/wasm/src/lib.rs index 9f2f558f85c..43095fee4d4 100644 --- a/compiler/wasm/src/lib.rs +++ b/compiler/wasm/src/lib.rs @@ -14,11 +14,15 @@ use wasm_bindgen::prelude::*; mod circuit; mod compile; +mod compile_new; mod errors; pub use circuit::{acir_read_bytes, acir_write_bytes}; pub use compile::compile; +// Expose the new Context-Centric API +pub use compile_new::{compile_, CompilerContext, CrateIDWrapper}; + #[derive(Serialize, Deserialize)] pub struct BuildInfo { git_hash: &'static str, diff --git a/compiler/wasm/test/browser/index.test.ts b/compiler/wasm/test/browser/index.test.ts index 346c20c834c..7364a8a4d11 100644 --- a/compiler/wasm/test/browser/index.test.ts +++ b/compiler/wasm/test/browser/index.test.ts @@ -39,9 +39,9 @@ describe('noir wasm', () => { } // We don't expect the hashes to match due to how `noir_wasm` handles dependencies + expect(wasmCircuit.program.noir_version).to.eq(cliCircuit.noir_version); expect(wasmCircuit.program.bytecode).to.eq(cliCircuit.bytecode); expect(wasmCircuit.program.abi).to.deep.eq(cliCircuit.abi); - expect(wasmCircuit.program.backend).to.eq(cliCircuit.backend); }).timeout(20e3); // 20 seconds }); @@ -77,9 +77,9 @@ describe('noir wasm', () => { const cliCircuit = await getPrecompiledSource(depsScriptExpectedArtifact); // We don't expect the hashes to match due to how `noir_wasm` handles dependencies + expect(wasmCircuit.program.noir_version).to.eq(cliCircuit.noir_version); expect(wasmCircuit.program.bytecode).to.eq(cliCircuit.bytecode); expect(wasmCircuit.program.abi).to.deep.eq(cliCircuit.abi); - expect(wasmCircuit.program.backend).to.eq(cliCircuit.backend); }).timeout(20e3); // 20 seconds }); }); diff --git a/compiler/wasm/test/node/index.test.ts b/compiler/wasm/test/node/index.test.ts index 5cf9e3be2df..57c1b459d60 100644 --- a/compiler/wasm/test/node/index.test.ts +++ b/compiler/wasm/test/node/index.test.ts @@ -9,7 +9,7 @@ import { } from '../shared'; import { readFileSync } from 'node:fs'; import { join, resolve } from 'node:path'; -import { compile, PathToFileSourceMap } from '@noir-lang/noir_wasm'; +import { compile, compile_, CompilerContext, PathToFileSourceMap } from '@noir-lang/noir_wasm'; // eslint-disable-next-line @typescript-eslint/no-explicit-any async function getPrecompiledSource(path: string): Promise { @@ -33,9 +33,9 @@ describe('noir wasm compilation', () => { } // We don't expect the hashes to match due to how `noir_wasm` handles dependencies + expect(wasmCircuit.program.noir_version).to.eq(cliCircuit.noir_version); expect(wasmCircuit.program.bytecode).to.eq(cliCircuit.bytecode); expect(wasmCircuit.program.abi).to.deep.eq(cliCircuit.abi); - expect(wasmCircuit.program.backend).to.eq(cliCircuit.backend); }).timeout(10e3); }); @@ -67,9 +67,83 @@ describe('noir wasm compilation', () => { } // We don't expect the hashes to match due to how `noir_wasm` handles dependencies + expect(wasmCircuit.program.noir_version).to.eq(cliCircuit.noir_version); + expect(wasmCircuit.program.bytecode).to.eq(cliCircuit.bytecode); + expect(wasmCircuit.program.abi).to.deep.eq(cliCircuit.abi); + }).timeout(10e3); + }); + + describe('can compile scripts with dependencies -- context-api', () => { + let sourceMap: PathToFileSourceMap; + beforeEach(() => { + sourceMap = new PathToFileSourceMap(); + sourceMap.add_source_code('script/main.nr', readFileSync(join(__dirname, depsScriptSourcePath), 'utf-8')); + sourceMap.add_source_code('lib_a/lib.nr', readFileSync(join(__dirname, libASourcePath), 'utf-8')); + sourceMap.add_source_code('lib_b/lib.nr', readFileSync(join(__dirname, libBSourcePath), 'utf-8')); + }); + + it('matching nargos compilation - context-api', async () => { + const compilerContext = new CompilerContext(sourceMap); + + // Process root crate + const root_crate_id = compilerContext.process_root_crate('script/main.nr'); + // Process dependencies + // + // This can be direct dependencies or transitive dependencies + // I have named these crate_id_1 and crate_id_2 instead of `lib_a_crate_id` and `lib_b_crate_id` + // because the names of crates in a dependency graph are not determined by the actual package. + // + // It is true that each package is given a name, but if I include a `lib_a` as a dependency + // in my library, I do not need to refer to it as `lib_a` in my dependency graph. + // See https://doc.rust-lang.org/cargo/reference/specifying-dependencies.html#renaming-dependencies-in-cargotoml + // + // If you have looked at graphs before, then you can think of the dependency graph as a directed acyclic graph (DAG) + const crate_id_1 = compilerContext.process_dependency_crate('lib_a/lib.nr'); + const crate_id_2 = compilerContext.process_dependency_crate('lib_b/lib.nr'); + + // Root crate depends on `crate_id_1` and this edge is called `lib_a` + compilerContext.add_dependency_edge('lib_a', root_crate_id, crate_id_1); + // `crate_id_1` depends on `crate_id_2` and this edge is called `lib_b` + compilerContext.add_dependency_edge('lib_b', crate_id_1, crate_id_2); + + const program_width = 3; + const wasmCircuit = await compilerContext.compile_program(program_width); + + const cliCircuit = await getPrecompiledSource(depsScriptExpectedArtifact); + + if (!('program' in wasmCircuit)) { + throw Error('Expected program to be present'); + } + + // We don't expect the hashes to match due to how `noir_wasm` handles dependencies + expect(wasmCircuit.program.noir_version).to.eq(cliCircuit.noir_version); + expect(wasmCircuit.program.bytecode).to.eq(cliCircuit.bytecode); + expect(wasmCircuit.program.abi).to.deep.eq(cliCircuit.abi); + }).timeout(10e3); + + it('matching nargos compilation - context-implementation-compile-api', async () => { + const wasmCircuit = await compile_( + 'script/main.nr', + false, + { + root_dependencies: ['lib_a'], + library_dependencies: { + lib_a: ['lib_b'], + }, + }, + sourceMap, + ); + + const cliCircuit = await getPrecompiledSource(depsScriptExpectedArtifact); + + if (!('program' in wasmCircuit)) { + throw Error('Expected program to be present'); + } + + // We don't expect the hashes to match due to how `noir_wasm` handles dependencies + expect(wasmCircuit.program.noir_version).to.eq(cliCircuit.noir_version); expect(wasmCircuit.program.bytecode).to.eq(cliCircuit.bytecode); expect(wasmCircuit.program.abi).to.deep.eq(cliCircuit.abi); - expect(wasmCircuit.program.backend).to.eq(cliCircuit.backend); }).timeout(10e3); }); }); diff --git a/docs/.gitignore b/docs/.gitignore index e4abc8785c7..4f6eee8284e 100644 --- a/docs/.gitignore +++ b/docs/.gitignore @@ -22,3 +22,4 @@ yarn-debug.log* yarn-error.log* package-lock.json +versions.json diff --git a/docs/docs/explainers/explainer-recursion.md b/docs/docs/explainers/explainer-recursion.md new file mode 100644 index 00000000000..cc431a878dc --- /dev/null +++ b/docs/docs/explainers/explainer-recursion.md @@ -0,0 +1,175 @@ +--- +title: Recursive proofs +description: Explore the concept of recursive proofs in Zero-Knowledge programming. Understand how recursion works in Noir, a language for writing smart contracts on the EVM blockchain. Learn through practical examples like Alice and Bob's guessing game, Charlie's recursive merkle tree, and Daniel's reusable components. Discover how to use recursive proofs to optimize computational resources and improve efficiency. + +keywords: + [ + "Recursive Proofs", + "Zero-Knowledge Programming", + "Noir", + "EVM Blockchain", + "Smart Contracts", + "Recursion in Noir", + "Alice and Bob Guessing Game", + "Recursive Merkle Tree", + "Reusable Components", + "Optimizing Computational Resources", + "Improving Efficiency", + "Verification Key", + "Aggregation Objects", + "Recursive zkSNARK schemes", + "PLONK", + "Proving and Verification Keys" + ] +sidebar_position: 1 +--- + +In programming, we tend to think of recursion as something calling itself. A classic example would be the calculation of the factorial of a number: + +```js +function factorial(n) { + if (n === 0 || n === 1) { + return 1; + } else { + return n * factorial(n - 1); + } +} +``` + +In this case, while `n` is not `1`, this function will keep calling itself until it hits the base case, bubbling up the result on the call stack: + +```md + Is `n` 1? <--------- + /\ / + / \ n = n -1 + / \ / + Yes No -------- +``` + +In Zero-Knowledge, recursion has some similarities. + +It is not a Noir function calling itself, but a proof being used as an input to another circuit. In short, you verify one proof *inside* another proof, returning the proof that both proofs are valid. + +This means that, given enough computational resources, you can prove the correctness of any arbitrary number of proofs in a single proof. This could be useful to design state channels (for which a common example would be [Bitcoin's Lightning Network](https://en.wikipedia.org/wiki/Lightning_Network)), to save on gas costs by settling one proof on-chain, or simply to make business logic less dependent on a consensus mechanism. + +## Examples + +Let us look at some of these examples + +### Alice and Bob - Guessing game + +Alice and Bob are friends, and they like guessing games. They want to play a guessing game online, but for that, they need a trusted third-party that knows both of their secrets and finishes the game once someone wins. + +So, they use zero-knowledge proofs. Alice tries to guess Bob's number, and Bob will generate a ZK proof stating whether she succeeded or failed. + +This ZK proof can go on a smart contract, revealing the winner and even giving prizes. However, this means every turn needs to be verified on-chain. This incurs some cost and waiting time that may simply make the game too expensive or time-consuming to be worth it. + +So, Alice started thinking: "what if Bob generates his proof, and instead of sending it on-chain, I verify it *within* my own proof before playing my own turn?". She can then generate a proof that she verified his proof, and so on. + +```md + Did you fail? <-------------------------- + / \ / + / \ n = n -1 + / \ / + Yes No / + | | / + | | / + | You win / + | / + | / +Generate proof of that / + + / + my own guess ---------------- +``` + +### Charlie - Recursive merkle tree + +Charlie is a concerned citizen, and wants to be sure his vote in an election is accounted for. He votes with a ZK proof, but he has no way of knowing that his ZK proof was included in the total vote count! + +So, the tallier puts all the votes in a merkle tree, and everyone can also prove the verification of two proofs within one proof, as such: + +```md + abcd + __________|______________ + | | + ab cd + _____|_____ ______|______ + | | | | + alice bob charlie daniel +``` + +Doing this recursively allows us to arrive on a final proof `abcd` which if true, verifies the correctness of all the votes. + +### Daniel - Reusable components + +Daniel has a big circuit and a big headache. A part of his circuit is a setup phase that finishes with some assertions that need to be made. But that section alone takes most of the proving time, and is largely independent of the rest of the circuit. + +He could find it more efficient to generate a proof for that setup phase separately, and verifying it in his actual business logic section of the circuit. This will allow for parallelization of both proofs, which results in a considerable speedup. + +## What params do I need + +As you can see in the [recursion reference](noir/standard_library/recursion.md), a simple recursive proof requires: + +- The proof to verify +- The Verification Key of the circuit that generated the proof +- A hash of this verification key, as it's needed for some backends +- The public inputs for the proof +- The input aggregation object + +It also returns the `output aggregation object`. These aggregation objects can be confusing at times, so let's dive in a little bit. + +### Aggregation objects + +Recursive zkSNARK schemes do not necessarily "verify a proof" in the sense that you expect a true or false to be spit out by the verifier. Rather an aggregation object is built over the public inputs. + +In the case of PLONK the recursive aggregation object is two G1 points (expressed as 16 witness values). The final verifier (in our case this is most often the smart contract verifier) has to be aware of this aggregation object to execute a pairing and check the validity of these points. + +So, taking the example of Alice and Bob and their guessing game: + +- Alice makes her guess. Her proof is *not* recursive: it doesn't verify any proof within it! It's just a standard `assert(x != y)` circuit +- Bob verifies Alice's proof and makes his own guess. In this circuit, he is verifying a proof, so it needs to output an `aggregation object`: he is generating a recursive proof! +- Alice verifies Bob's *recursive proof*, and uses Bob's `output aggregation object` as the `input aggregation object` in her proof... Which in turn, generates another `output aggregation object`. + +One should notice that when Bob generates his first proof, he has no input aggregation object. Because he is not verifying an recursive proof, he has no `input aggregation object`. In this case, he may use zeros instead. + +We can imagine the `aggregation object` as the baton in a [relay race](https://en.wikipedia.org/wiki/Relay_race). The first runner doesn't have to receive the baton from anyone else, as he/she already starts with it. But when his/her turn is over, the next runner needs to receive it, run a bit more, and pass it along. Even though every runner could theoretically verify the baton mid-run (why not? 🏃🔍), only at the end of the race does the referee verify that the whole race is valid. + +## Some architecture + +As with everything in computer science, there's no one-size-fits all. But there are some patterns that could help understanding and implementing them. To give three examples: + +### Adding some logic to a proof verification + +This would be an approach for something like our guessing game, where proofs are sent back and forth and are verified by each opponent. This circuit would be divided in two sections: + +- A `recursive verification` section, which would be just the call to `std::verify_proof`, and that would be skipped on the first move (since there's no proof to verify) +- A `guessing` section, which is basically the logic part where the actual guessing happens + +In such a situation, and assuming Alice is first, she would skip the first part and try to guess Bob's number. Bob would then verify her proof on the first section of his run, and try to guess Alice's number on the second part, and so on. + +### Aggregating proofs + +In some one-way interaction situations, recursiveness would allow for aggregation of simple proofs that don't need to be immediately verified on-chain or elsewhere. + +To give a practical example, a barman wouldn't need to verify a "proof-of-age" on-chain every time he serves alcohol to a customer. Instead, the architecture would comprise two circuits: + +- A `main`, non-recursive circuit with some logic +- A `recursive` circuit meant to verify two proofs in one proof + +The customer's proofs would be intermediate, and made on their phones, and the barman could just verify them locally. He would then aggregate them into a final proof sent on-chain (or elsewhere) at the end of the day. + +### Recursively verifying different circuits + +Nothing prevents you from verifying different circuits in a recursive proof, for example: + +- A `circuit1` circuit +- A `circuit2` circuit +- A `recursive` circuit + +In this example, a regulator could verify that taxes were paid for a specific purchase by aggregating both a `payer` circuit (proving that a purchase was made and taxes were paid), and a `receipt` circuit (proving that the payment was received) + +## How fast is it + +At the time of writing, verifying recursive proofs is surprisingly fast. This is because most of the time is spent on generating the verification key that will be used to generate the next proof. So you are able to cache the verification key and reuse it later. + +Currently, Noir JS packages don't expose the functionality of loading proving and verification keys, but that feature exists in the underlying `bb.js` package. diff --git a/docs/docs/explanations/noir/traits.md b/docs/docs/explanations/noir/traits.md index d24deaa84da..7ba07e74f40 100644 --- a/docs/docs/explanations/noir/traits.md +++ b/docs/docs/explanations/noir/traits.md @@ -53,7 +53,7 @@ trait Area { fn area(self) -> Field; } -fn log_area(shape: S) where S: Area { +fn log_area(shape: T) where T: Area { println(shape.area()); } ``` @@ -85,7 +85,7 @@ As seen in `log_area` above, when we want to create a function or method that is a trait, we can add a where clause to the generic function. ```rust -fn log_area(shape: S) where S: Area { +fn log_area(shape: T) where T: Area { println(shape.area()); } ``` @@ -94,7 +94,7 @@ It is also possible to apply multiple trait constraints on the same variable at operator. Similarly, we can have multiple trait constraints by separating each with a comma: ```rust -fn foo(elements: [T], thing: U) where +fn foo(elements: [T], thing: U) where T: Default + Add + Eq, U: Bar, { diff --git a/docs/docs/getting_started/create_a_project.md b/docs/docs/getting_started/create_a_project.md index 76bed5be9b8..f10916c39c5 100644 --- a/docs/docs/getting_started/create_a_project.md +++ b/docs/docs/getting_started/create_a_project.md @@ -69,7 +69,7 @@ x : Field, y : pub Field Program inputs in Noir are private by default (e.g. `x`), but can be labeled public using the keyword `pub` (e.g. `y`). To learn more about private and public values, check the -[Data Types](@site/docs/explanations/noir/data_types/index.md) section. +[Data Types](../noir/syntax/data_types/index.md) section. The next line of the program specifies its body: @@ -79,7 +79,7 @@ assert(x != y); The Noir syntax `assert` can be interpreted as something similar to constraints in other zk-contract languages. -For more Noir syntax, check the [Language Concepts](@site/docs/explanations/noir/comments.md) chapter. +For more Noir syntax, check the [Language Concepts](../noir/syntax/comments.md) chapter. ## Build In/Output Files @@ -139,4 +139,4 @@ corresponding error instead. Congratulations, you have now created and verified a proof for your very first Noir program! -In the [next section](@site/docs/getting_started/project_breakdown.md), we will go into more detail on each step performed. +In the [next section](./project_breakdown.md), we will go into more detail on each step performed. diff --git a/docs/docs/getting_started/installation/other_install_methods.md b/docs/docs/getting_started/installation/other_install_methods.md index d90a10103d9..36f05657277 100644 --- a/docs/docs/getting_started/installation/other_install_methods.md +++ b/docs/docs/getting_started/installation/other_install_methods.md @@ -24,7 +24,7 @@ sidebar_position: 1 ## Installation -The most common method of installing Nargo is through [Noirup](@site/docs/getting_started/installation/index.md) +The most common method of installing Nargo is through [Noirup](./index.md) However, there are other methods for installing Nargo: @@ -167,7 +167,7 @@ The default backend for Noir (Barretenberg) doesn't provide Windows binaries at Step 1: Follow the instructions [here](https://learn.microsoft.com/en-us/windows/wsl/install) to install and run WSL. -step 2: Follow the [Noirup instructions](@site/docs/getting_started/installation/index.md). +step 2: Follow the [Noirup instructions](./index.md). ## Uninstalling Nargo diff --git a/docs/docs/getting_started/project_breakdown.md b/docs/docs/getting_started/project_breakdown.md index 026127b9ed0..5a214804f7b 100644 --- a/docs/docs/getting_started/project_breakdown.md +++ b/docs/docs/getting_started/project_breakdown.md @@ -52,7 +52,7 @@ license = "MIT" ecrecover = {tag = "v0.9.0", git = "https://github.com/colinnielsen/ecrecover-noir.git"} ``` -Nargo.toml for a [workspace](@site/docs/explanations/modules_packages_crates/workspaces.md) will look a bit different. For example: +Nargo.toml for a [workspace](../noir/modules_packages_crates/workspaces.md) will look a bit different. For example: ```toml [workspace] @@ -75,7 +75,7 @@ The package section requires a number of fields including: #### Dependencies section -This is where you will specify any dependencies for your project. See the [Dependencies page](@site/docs/explanations/modules_packages_crates/dependencies.md) for more info. +This is where you will specify any dependencies for your project. See the [Dependencies page](../noir/modules_packages_crates/dependencies.md) for more info. `./proofs/` and `./contract/` directories will not be immediately visible until you create a proof or verifier contract respectively. diff --git a/docs/docs/how_to/how-to-recursion.md b/docs/docs/how_to/how-to-recursion.md new file mode 100644 index 00000000000..226f7e6e73d --- /dev/null +++ b/docs/docs/how_to/how-to-recursion.md @@ -0,0 +1,184 @@ +--- +title: How to use recursion on NoirJS +description: Learn how to implement recursion with NoirJS, a powerful tool for creating smart contracts on the EVM blockchain. This guide assumes familiarity with NoirJS, solidity verifiers, and the Barretenberg proving backend. Discover how to generate both final and intermediate proofs using `noir_js` and `backend_barretenberg`. +keywords: + [ + "NoirJS", + "EVM blockchain", + "smart contracts", + "recursion", + "solidity verifiers", + "Barretenberg backend", + "noir_js", + "backend_barretenberg", + "intermediate proofs", + "final proofs", + "nargo compile", + "json import", + "recursive circuit", + "recursive app" + ] +sidebar_position: 1 +--- + +This guide shows you how to use recursive proofs in your NoirJS app. For the sake of clarity, it is assumed that: + +- You already have a NoirJS app. If you don't, please visit the [NoirJS tutorial](../tutorials/noirjs_app.md) and the [reference](../reference/NoirJS/noir_js/index.md). +- You are familiar with what are recursive proofs and you have read the [recursion explainer](../explainers/explainer-recursion.md) +- You already built a recursive circuit following [the reference](../noir/standard_library/recursion.md), and understand how it works. + +It is also assumed that you're not using `noir_wasm` for compilation, and instead you've used [`nargo compile`](../reference/nargo_commands.md) to generate the `json` you're now importing into your project. However, the guide should work just the same if you're using `noir_wasm`. + +:::info + +As you've read in the [explainer](../explainers/explainer-recursion.md), a recursive proof is an intermediate proof. Meaning it doesn't necessarily generate the final step that makes it verifiable in a smart contract. However, it is easy to verify within another circuit. + +While "standard" usage of NoirJS packages abstracts final proofs, it currently lacks the necessary interface to abstract away intermediate proofs. Which means these proofs need to be created by using the backend directly. + +In short: + +- `noir_js` generates *only* final proofs +- `backend_barretenberg` generates both types of proofs + +::: + +In a standard recursive app, you're also dealing with at least two circuits. For the purpose of this guide, we will assume these two: + +- `main`: a circuit of type `assert(x != y)` +- `recursive`: a circuit that verifies `main` + +For a full example on how recursive proofs work, please refer to the [noir-examples](https://github.com/noir/noir-examples) repository. We will *not* be using it as a reference for this guide. + +## Step 1: Setup + +In a common NoirJS app, you need to instantiate a backend with something like `const backend = new Backend(circuit)`. Then you feed it to the `noir_js` interface. + +For recursiveness, this doesn't happen, and the only need for `noir_js` is only to `execute` a circuit and get its witness and return value. Everything else is not interfaced, so it needs to happen on the `backend` object. + +It is also recommended that you instantiate the backend with as many threads as possible, to allow for maximum concurrency: + +```js +const backend = new Backend(circuit, { threads: 8 }) +``` + +:::tip +You can use the [`os.cpus()`](https://nodejs.org/api/os.html#oscpus) object in `nodejs` or [`navigator.hardwareConcurrency`](https://developer.mozilla.org/en-US/docs/Web/API/Navigator/hardwareConcurrency) on the browser to make the most out of those glorious cpu cores +::: + +## Step 2: Generating the witness and the proof for `main` + +After instantiating the backend, you should also instantiate `noir_js`. We will use it to execute the circuit and get the witness. + +```js +const noir = new Noir(circuit, backend) +const { witness } = noir.execute(input) +``` + +With this witness, you are now able to generate the intermediate proof for the main circuit: + +```js +const { proof, publicInputs } = await backend.generateIntermediateProof(witness) +``` + +:::warning + +Always keep in mind what is actually happening on your development process, otherwise you'll quickly become confused about what circuit are we actually running and why! + +In this case, you can imagine that Alice (running the `main` circuit) is proving something to Bob (running the `recursive` circuit), and Bob is verifying her proof within his proof. + +With this in mind, it becomes clear that our intermediate proof is the one *meant to be verified within another circuit*. So it is Alice's. Actually, the only final proof in this theoretical scenario would be the last one, sent on-chain. + +::: + +## Step 3 - Verification and proof artifacts + +Optionally, you are able to verify the intermediate proof: + +```js +const verified = await backend.verifyIntermediateProof({ proof, publicInputs }) +``` + +This can be useful to make sure our intermediate proof was correctly generated. But the real goal is to do it within another circuit. For that, we need to generate the intermediate artifacts: + +```js +const { proofAsFields, vkAsFields, vkHash } = await backend.generateIntermediateProofArtifacts( { publicInputs, proof }, publicInputsCount) +``` + +This call takes the public inputs and the proof, but also the public inputs count. While this is easily retrievable by simply counting the `publicInputs` length, the backend interface doesn't currently abstract it away. + +:::info + +The `proofAsFields` has a constant size `[Field; 93]`. However, currently the backend doesn't remove the public inputs from the proof when converting it. + +This means that if your `main` circuit has two public inputs, then you should also modify the recursive circuit to accept a proof with the public inputs appended. This means that in our example, since `y` is a public input, our `proofAsFields` is of type `[Field; 94]`. + +Verification keys in Barretenberg are always of size 114. + +::: + +:::warning + +One common mistake is to forget *who* makes this call. + +In a situation where Alice is generating the `main` proof, if she generates the proof artifacts and sends them to Bob, which gladly takes them as true, this would mean Alice could prove anything! + +Instead, Bob needs to make sure *he* extracts the proof artifacts, using his own instance of the `main` circuit backend. This way, Alice has to provide a valid proof for the correct `main` circuit. + +::: + +## Step 4 - Recursive proof generation + +With the artifacts, generating a recursive proof is no different from a normal proof. You simply use the `backend` (with the recursive circuit) to generate it: + +```js +const recursiveInputs = { + verification_key: vkAsFields, // array of length 114 + proof: proofAsFields, // array of length 93 + size of public inputs + publicInputs: [mainInput.y], // using the example above, where `y` is the only public input + key_hash: vkHash, + input_aggregation_object: Array(16).fill(0) // this circuit is verifying a non-recursive proof, so there's no input aggregation object: just use zero +} + +const { witness, returnValue } = noir.execute(recursiveInputs) // we're executing the recursive circuit now! +const { proof, publicInputs } = backend.generateFinalProof(witness) +const verified = backend.verifyFinalProof({ proof, publicInputs }) +``` + +You can obviously chain this proof into another proof. In fact, if you're using recursive proofs, you're probably interested of using them this way! In that case, you should keep in mind the `returnValue`, as it will contain the `input_aggregation_object` for the next proof. + +:::tip + +Managing circuits and "who does what" can be confusing. To make sure your naming is consistent, you can keep them in an object. For example: + +```js +const circuits = { +main: mainJSON, +recursive: recursiveJSON +} +const backends = { +main: new BarretenbergBackend(circuits.main), +recursive: new BarretenbergBackend(circuits.recursive) +} +const noirs = { +main: new Noir(circuits.main, backends.main), +recursive: new Noir(circuits.recursive, backends.recursive) +} +``` + +This allows you to neatly call exactly the method you want without conflicting names: + +```js +// Alice runs this 👇 +const { witness: mainWitness } = await noirs.main.execute(input) +const proof = await backends.main.generateIntermediateProof(mainWitness) + +// Bob runs this 👇 +const verified = await backends.main.verifyIntermediateProof(proof) +const { proofAsFields, vkAsFields, vkHash } = await backends.main.generateIntermediateProofArtifacts( + proof, + numPublicInputs, +); +const recursiveProof = await noirs.recursive.generateFinalProof(recursiveInputs) +``` + +::: diff --git a/docs/docs/how_to/03_solidity_verifier.md b/docs/docs/how_to/solidity_verifier.md similarity index 98% rename from docs/docs/how_to/03_solidity_verifier.md rename to docs/docs/how_to/solidity_verifier.md index 1a89fe492f4..8022b0e5f20 100644 --- a/docs/docs/how_to/03_solidity_verifier.md +++ b/docs/docs/how_to/solidity_verifier.md @@ -3,7 +3,7 @@ title: Generate a Solidity Verifier description: Learn how to run the verifier as a smart contract on the blockchain. Compile a Solidity verifier contract for your Noir program and deploy it on any EVM blockchain acting as a verifier smart - contract. Read more to find out! + contract. Read more to find out keywords: [ solidity verifier, @@ -16,6 +16,7 @@ keywords: proving backend, Barretenberg, ] +sidebar_position: 0 --- For certain applications, it may be desirable to run the verifier as a smart contract instead of on diff --git a/docs/docs/index.md b/docs/docs/index.md index 754f9f6e31d..016832f9f5e 100644 --- a/docs/docs/index.md +++ b/docs/docs/index.md @@ -38,7 +38,7 @@ programming. ### Solidity Developers Noir streamlines the creation of Solidity contracts that interface with SNARK systems. -[`Utilize the nargo codegen-verifier`](@site/docs/reference/nargo_commands.md#nargo-codegen-verifier) command to construct verifier +[`Utilize the nargo codegen-verifier`](./reference/nargo_commands.md#nargo-codegen-verifier) command to construct verifier contracts efficiently. While the current alpha version offers this as a direct feature, future updates aim to modularize this process for even greater ease of use. @@ -82,4 +82,4 @@ Some libraries that are available today include: - [Fraction](https://github.com/resurgencelabs/fraction) - a library for accessing fractional number data type in Noir, allowing results that aren't whole numbers -See the section on [dependencies](@site/docs/explanations/modules_packages_crates/dependencies.md) for more information. +See the section on [dependencies](noir/modules_packages_crates/dependencies.md) for more information. diff --git a/docs/docs/migration_notes.md b/docs/docs/migration_notes.md index a5fd10769f7..d5d0682cf0c 100644 --- a/docs/docs/migration_notes.md +++ b/docs/docs/migration_notes.md @@ -16,7 +16,7 @@ To update, please make sure this field in `Nargo.toml` matches the output of `na ## ≥0.14 -The index of the [for loops](@site/docs/explanations/noir/control_flow.md#loops) is now of type `u64` instead of `Field`. An example refactor would be: +The index of the [for loops](noir/syntax/control_flow.md#loops) is now of type `u64` instead of `Field`. An example refactor would be: ```rust for i in 0..10 { diff --git a/docs/docs/explanations/modules_packages_crates/_category_.json b/docs/docs/noir/modules_packages_crates/_category_.json similarity index 100% rename from docs/docs/explanations/modules_packages_crates/_category_.json rename to docs/docs/noir/modules_packages_crates/_category_.json diff --git a/docs/docs/explanations/modules_packages_crates/crates_and_packages.md b/docs/docs/noir/modules_packages_crates/crates_and_packages.md similarity index 100% rename from docs/docs/explanations/modules_packages_crates/crates_and_packages.md rename to docs/docs/noir/modules_packages_crates/crates_and_packages.md diff --git a/docs/docs/explanations/modules_packages_crates/dependencies.md b/docs/docs/noir/modules_packages_crates/dependencies.md similarity index 100% rename from docs/docs/explanations/modules_packages_crates/dependencies.md rename to docs/docs/noir/modules_packages_crates/dependencies.md diff --git a/docs/docs/explanations/modules_packages_crates/modules.md b/docs/docs/noir/modules_packages_crates/modules.md similarity index 100% rename from docs/docs/explanations/modules_packages_crates/modules.md rename to docs/docs/noir/modules_packages_crates/modules.md diff --git a/docs/docs/explanations/modules_packages_crates/workspaces.md b/docs/docs/noir/modules_packages_crates/workspaces.md similarity index 100% rename from docs/docs/explanations/modules_packages_crates/workspaces.md rename to docs/docs/noir/modules_packages_crates/workspaces.md diff --git a/docs/docs/explanations/standard_library/_category_.json b/docs/docs/noir/standard_library/_category_.json similarity index 100% rename from docs/docs/explanations/standard_library/_category_.json rename to docs/docs/noir/standard_library/_category_.json diff --git a/docs/docs/explanations/standard_library/black_box_fns.md b/docs/docs/noir/standard_library/black_box_fns.md similarity index 100% rename from docs/docs/explanations/standard_library/black_box_fns.md rename to docs/docs/noir/standard_library/black_box_fns.md diff --git a/docs/docs/explanations/noir/data_types/_category_.json b/docs/docs/noir/standard_library/cryptographic_primitives/_category_.json similarity index 100% rename from docs/docs/explanations/noir/data_types/_category_.json rename to docs/docs/noir/standard_library/cryptographic_primitives/_category_.json diff --git a/docs/docs/explanations/standard_library/cryptographic_primitives/ec_primitives.md b/docs/docs/noir/standard_library/cryptographic_primitives/ec_primitives.md similarity index 100% rename from docs/docs/explanations/standard_library/cryptographic_primitives/ec_primitives.md rename to docs/docs/noir/standard_library/cryptographic_primitives/ec_primitives.md diff --git a/docs/docs/explanations/standard_library/cryptographic_primitives/ecdsa_sig_verification.mdx b/docs/docs/noir/standard_library/cryptographic_primitives/ecdsa_sig_verification.mdx similarity index 100% rename from docs/docs/explanations/standard_library/cryptographic_primitives/ecdsa_sig_verification.mdx rename to docs/docs/noir/standard_library/cryptographic_primitives/ecdsa_sig_verification.mdx diff --git a/docs/docs/explanations/standard_library/cryptographic_primitives/eddsa.mdx b/docs/docs/noir/standard_library/cryptographic_primitives/eddsa.mdx similarity index 100% rename from docs/docs/explanations/standard_library/cryptographic_primitives/eddsa.mdx rename to docs/docs/noir/standard_library/cryptographic_primitives/eddsa.mdx diff --git a/docs/docs/explanations/standard_library/cryptographic_primitives/hashes.mdx b/docs/docs/noir/standard_library/cryptographic_primitives/hashes.mdx similarity index 100% rename from docs/docs/explanations/standard_library/cryptographic_primitives/hashes.mdx rename to docs/docs/noir/standard_library/cryptographic_primitives/hashes.mdx diff --git a/docs/docs/explanations/standard_library/cryptographic_primitives/index.md b/docs/docs/noir/standard_library/cryptographic_primitives/index.md similarity index 100% rename from docs/docs/explanations/standard_library/cryptographic_primitives/index.md rename to docs/docs/noir/standard_library/cryptographic_primitives/index.md diff --git a/docs/docs/explanations/standard_library/cryptographic_primitives/scalar.mdx b/docs/docs/noir/standard_library/cryptographic_primitives/scalar.mdx similarity index 100% rename from docs/docs/explanations/standard_library/cryptographic_primitives/scalar.mdx rename to docs/docs/noir/standard_library/cryptographic_primitives/scalar.mdx diff --git a/docs/docs/explanations/standard_library/cryptographic_primitives/schnorr.mdx b/docs/docs/noir/standard_library/cryptographic_primitives/schnorr.mdx similarity index 100% rename from docs/docs/explanations/standard_library/cryptographic_primitives/schnorr.mdx rename to docs/docs/noir/standard_library/cryptographic_primitives/schnorr.mdx diff --git a/docs/docs/explanations/standard_library/logging.md b/docs/docs/noir/standard_library/logging.md similarity index 100% rename from docs/docs/explanations/standard_library/logging.md rename to docs/docs/noir/standard_library/logging.md diff --git a/docs/docs/explanations/standard_library/merkle_trees.md b/docs/docs/noir/standard_library/merkle_trees.md similarity index 93% rename from docs/docs/explanations/standard_library/merkle_trees.md rename to docs/docs/noir/standard_library/merkle_trees.md index 07fa2ccda79..5b45617812a 100644 --- a/docs/docs/explanations/standard_library/merkle_trees.md +++ b/docs/docs/noir/standard_library/merkle_trees.md @@ -17,7 +17,7 @@ keywords: ## compute_merkle_root -Returns the root of the tree from the provided leaf and its hash path, using a [Pedersen hash](@site/docs/explanations/standard_library/cryptographic_primitives/hashes.mdx#pedersen_hash). +Returns the root of the tree from the provided leaf and its hash path, using a [Pedersen hash](./cryptographic_primitives/hashes.mdx#pedersen_hash). ```rust fn compute_merkle_root(leaf : Field, index : Field, hash_path: [Field]) -> Field diff --git a/docs/docs/explanations/standard_library/options.md b/docs/docs/noir/standard_library/options.md similarity index 100% rename from docs/docs/explanations/standard_library/options.md rename to docs/docs/noir/standard_library/options.md diff --git a/docs/docs/explanations/standard_library/recursion.md b/docs/docs/noir/standard_library/recursion.md similarity index 65% rename from docs/docs/explanations/standard_library/recursion.md rename to docs/docs/noir/standard_library/recursion.md index ff4c63acaa7..67962082a8f 100644 --- a/docs/docs/explanations/standard_library/recursion.md +++ b/docs/docs/noir/standard_library/recursion.md @@ -19,11 +19,7 @@ This is a black box function. Read [this section](./black_box_fns) to learn more ::: -## Aggregation Object - -The purpose of the input aggregation object is a little less clear though (and the output aggregation object that is returned from the `std::verify_proof` method). Recursive zkSNARK schemes do not necessarily "verify a proof" in the sense that you expect a true or false to be spit out by the verifier. Rather an aggregation object is built over the public inputs. In the case of PLONK the recursive aggregation object is two G1 points (expressed as 16 witness values). The final verifier (in our case this is most often the smart contract verifier) has to be aware of this aggregation object to execute a pairing and check the validity of these points (thus completing the recursive verification). - -So for example in this circuit: +## Example usage ```rust use dep::std; @@ -37,17 +33,17 @@ fn main( proof_b : [Field; 94], ) -> pub [Field; 16] { let output_aggregation_object_a = std::verify_proof( - verification_key, - proof, - public_inputs, + verification_key.as_slice(), + proof.as_slice(), + public_inputs.as_slice(), key_hash, input_aggregation_object ); let output_aggregation_object = std::verify_proof( - verification_key, - proof_b, - public_inputs, + verification_key.as_slice(), + proof_b.as_slice(), + public_inputs.as_slice(), key_hash, output_aggregation_object_a ); @@ -60,8 +56,6 @@ fn main( } ``` -In this example we have a circuit, that generates proofs A and B, that is being verified in circuit C. Assuming that the proof being passed in is not already a recursive proof, the `input_aggregation_object` will be all zeros. It will then generate an `output_aggregation_object`. This blob of data then becomes the `input_aggregation_object` of the next recursive aggregation we wish to compute. We can see here as the same public inputs, verification key, and key hash are used that we are verifying two proofs generated from the same circuit in this single circuit. `std::verify_proof` returns a `[Field]` because the size of an aggregation object is proof system dependent--in barretenberg, aggregation objects are two G1 points, while in Halo2, the aggregation object is a list of G1 points that is log the circuit size. So for the final step we convert the slice into an array of size 16 because we are generating proofs using UltraPlonk. - ## Parameters ### `verification_key` diff --git a/docs/docs/explanations/standard_library/zeroed.md b/docs/docs/noir/standard_library/zeroed.md similarity index 100% rename from docs/docs/explanations/standard_library/zeroed.md rename to docs/docs/noir/standard_library/zeroed.md diff --git a/docs/docs/noir/syntax/_category_.json b/docs/docs/noir/syntax/_category_.json new file mode 100644 index 00000000000..666b691ae91 --- /dev/null +++ b/docs/docs/noir/syntax/_category_.json @@ -0,0 +1,6 @@ +{ + "label": "Syntax", + "position": 0, + "collapsible": true, + "collapsed": true +} diff --git a/docs/docs/explanations/noir/assert.md b/docs/docs/noir/syntax/assert.md similarity index 100% rename from docs/docs/explanations/noir/assert.md rename to docs/docs/noir/syntax/assert.md diff --git a/docs/docs/explanations/noir/comments.md b/docs/docs/noir/syntax/comments.md similarity index 100% rename from docs/docs/explanations/noir/comments.md rename to docs/docs/noir/syntax/comments.md diff --git a/docs/docs/explanations/noir/control_flow.md b/docs/docs/noir/syntax/control_flow.md similarity index 100% rename from docs/docs/explanations/noir/control_flow.md rename to docs/docs/noir/syntax/control_flow.md diff --git a/docs/docs/explanations/noir/data_bus.md b/docs/docs/noir/syntax/data_bus.md similarity index 100% rename from docs/docs/explanations/noir/data_bus.md rename to docs/docs/noir/syntax/data_bus.md diff --git a/docs/docs/explanations/standard_library/cryptographic_primitives/_category_.json b/docs/docs/noir/syntax/data_types/_category_.json similarity index 100% rename from docs/docs/explanations/standard_library/cryptographic_primitives/_category_.json rename to docs/docs/noir/syntax/data_types/_category_.json diff --git a/docs/docs/explanations/noir/data_types/arrays.md b/docs/docs/noir/syntax/data_types/arrays.md similarity index 100% rename from docs/docs/explanations/noir/data_types/arrays.md rename to docs/docs/noir/syntax/data_types/arrays.md diff --git a/docs/docs/explanations/noir/data_types/booleans.md b/docs/docs/noir/syntax/data_types/booleans.md similarity index 100% rename from docs/docs/explanations/noir/data_types/booleans.md rename to docs/docs/noir/syntax/data_types/booleans.md diff --git a/docs/docs/explanations/noir/data_types/fields.md b/docs/docs/noir/syntax/data_types/fields.md similarity index 100% rename from docs/docs/explanations/noir/data_types/fields.md rename to docs/docs/noir/syntax/data_types/fields.md diff --git a/docs/docs/explanations/noir/data_types/function_types.md b/docs/docs/noir/syntax/data_types/function_types.md similarity index 88% rename from docs/docs/explanations/noir/data_types/function_types.md rename to docs/docs/noir/syntax/data_types/function_types.md index f6121af17e2..61e4076adaf 100644 --- a/docs/docs/explanations/noir/data_types/function_types.md +++ b/docs/docs/noir/syntax/data_types/function_types.md @@ -23,4 +23,4 @@ fn main() { ``` A function type also has an optional capture environment - this is necessary to support closures. -See [Lambdas](../lambdas.md) for more details. +See [Lambdas](@site/docs/noir/syntax/lambdas.md) for more details. diff --git a/docs/docs/explanations/noir/data_types/index.md b/docs/docs/noir/syntax/data_types/index.md similarity index 97% rename from docs/docs/explanations/noir/data_types/index.md rename to docs/docs/noir/syntax/data_types/index.md index 56ed55c444b..52e568e9b7e 100644 --- a/docs/docs/explanations/noir/data_types/index.md +++ b/docs/docs/noir/syntax/data_types/index.md @@ -79,7 +79,7 @@ fn main() { } ``` -Type aliases can also be used with [generics](@site/docs/explanations/noir/generics.md): +Type aliases can also be used with [generics](@site/docs/noir/syntax/generics.md): ```rust type Id = Size; diff --git a/docs/docs/explanations/noir/data_types/integers.md b/docs/docs/noir/syntax/data_types/integers.md similarity index 100% rename from docs/docs/explanations/noir/data_types/integers.md rename to docs/docs/noir/syntax/data_types/integers.md diff --git a/docs/docs/explanations/noir/data_types/references.md b/docs/docs/noir/syntax/data_types/references.md similarity index 100% rename from docs/docs/explanations/noir/data_types/references.md rename to docs/docs/noir/syntax/data_types/references.md diff --git a/docs/docs/explanations/noir/data_types/slices.mdx b/docs/docs/noir/syntax/data_types/slices.mdx similarity index 100% rename from docs/docs/explanations/noir/data_types/slices.mdx rename to docs/docs/noir/syntax/data_types/slices.mdx diff --git a/docs/docs/explanations/noir/data_types/strings.md b/docs/docs/noir/syntax/data_types/strings.md similarity index 100% rename from docs/docs/explanations/noir/data_types/strings.md rename to docs/docs/noir/syntax/data_types/strings.md diff --git a/docs/docs/explanations/noir/data_types/structs.md b/docs/docs/noir/syntax/data_types/structs.md similarity index 100% rename from docs/docs/explanations/noir/data_types/structs.md rename to docs/docs/noir/syntax/data_types/structs.md diff --git a/docs/docs/explanations/noir/data_types/tuples.md b/docs/docs/noir/syntax/data_types/tuples.md similarity index 100% rename from docs/docs/explanations/noir/data_types/tuples.md rename to docs/docs/noir/syntax/data_types/tuples.md diff --git a/docs/docs/explanations/noir/data_types/vectors.mdx b/docs/docs/noir/syntax/data_types/vectors.mdx similarity index 100% rename from docs/docs/explanations/noir/data_types/vectors.mdx rename to docs/docs/noir/syntax/data_types/vectors.mdx diff --git a/docs/docs/explanations/noir/distinct.md b/docs/docs/noir/syntax/distinct.md similarity index 100% rename from docs/docs/explanations/noir/distinct.md rename to docs/docs/noir/syntax/distinct.md diff --git a/docs/docs/explanations/noir/functions.md b/docs/docs/noir/syntax/functions.md similarity index 92% rename from docs/docs/explanations/noir/functions.md rename to docs/docs/noir/syntax/functions.md index 94f929038ee..48aba9cd058 100644 --- a/docs/docs/explanations/noir/functions.md +++ b/docs/docs/noir/syntax/functions.md @@ -15,7 +15,7 @@ To declare a function the `fn` keyword is used. fn foo() {} ``` -By default, functions are visible only within the package they are defined. To make them visible outside of that package (for example, as part of a [library](@site/docs/explanations/modules_packages_crates/crates_and_packages.md#libraries)), you should mark them as `pub`: +By default, functions are visible only within the package they are defined. To make them visible outside of that package (for example, as part of a [library](../modules_packages_crates/crates_and_packages.md#libraries)), you should mark them as `pub`: ```rust pub fn foo() {} @@ -62,7 +62,7 @@ fn main(x : [Field]) // can't compile, has variable size fn main(....// i think you got it by now ``` -Keep in mind [tests](@site/docs/getting_started/tooling/testing.md) don't differentiate between `main` and any other function. The following snippet passes tests, but won't compile or prove: +Keep in mind [tests](../../getting_started/tooling/testing.md) don't differentiate between `main` and any other function. The following snippet passes tests, but won't compile or prove: ```rust fn main(x : [Field]) { @@ -189,8 +189,8 @@ Supported attributes include: - **builtin**: the function is implemented by the compiler, for efficiency purposes. - **deprecated**: mark the function as _deprecated_. Calling the function will generate a warning: `warning: use of deprecated function` - **field**: Used to enable conditional compilation of code depending on the field size. See below for more details -- **oracle**: mark the function as _oracle_; meaning it is an external unconstrained function, implemented in noir_js. See [Unconstrained](./unconstrained.md) and [NoirJS](@site/docs/reference/NoirJS/noir_js/index.md) for more details. -- **test**: mark the function as unit tests. See [Tests](@site/docs/getting_started/tooling/testing.md) for more details +- **oracle**: mark the function as _oracle_; meaning it is an external unconstrained function, implemented in noir_js. See [Unconstrained](./unconstrained.md) and [NoirJS](../../reference/NoirJS/noir_js/index.md) for more details. +- **test**: mark the function as unit tests. See [Tests](../../getting_started/tooling/testing.md) for more details ### Field Attribute diff --git a/docs/docs/explanations/noir/generics.md b/docs/docs/noir/syntax/generics.md similarity index 100% rename from docs/docs/explanations/noir/generics.md rename to docs/docs/noir/syntax/generics.md diff --git a/docs/docs/explanations/noir/lambdas.md b/docs/docs/noir/syntax/lambdas.md similarity index 100% rename from docs/docs/explanations/noir/lambdas.md rename to docs/docs/noir/syntax/lambdas.md diff --git a/docs/docs/explanations/noir/mutability.md b/docs/docs/noir/syntax/mutability.md similarity index 100% rename from docs/docs/explanations/noir/mutability.md rename to docs/docs/noir/syntax/mutability.md diff --git a/docs/docs/explanations/noir/ops.md b/docs/docs/noir/syntax/ops.md similarity index 100% rename from docs/docs/explanations/noir/ops.md rename to docs/docs/noir/syntax/ops.md diff --git a/docs/docs/explanations/noir/shadowing.md b/docs/docs/noir/syntax/shadowing.md similarity index 100% rename from docs/docs/explanations/noir/shadowing.md rename to docs/docs/noir/syntax/shadowing.md diff --git a/docs/docs/explanations/noir/unconstrained.md b/docs/docs/noir/syntax/unconstrained.md similarity index 100% rename from docs/docs/explanations/noir/unconstrained.md rename to docs/docs/noir/syntax/unconstrained.md diff --git a/docs/docs/reference/nargo_commands.md b/docs/docs/reference/nargo_commands.md index 239e88d9691..ff3dee8973f 100644 --- a/docs/docs/reference/nargo_commands.md +++ b/docs/docs/reference/nargo_commands.md @@ -213,7 +213,7 @@ you run `nargo test`. To print `println` statements in tests, use the `--show-ou Takes an optional `--exact` flag which allows you to select tests based on an exact name. -See an example on the [testing page](@site/docs/getting_started/tooling/testing.md). +See an example on the [testing page](../getting_started/tooling/testing.md). ### Options diff --git a/docs/docs/tutorials/noirjs_app.md b/docs/docs/tutorials/noirjs_app.md index e0f674fa09c..302ee4aeade 100644 --- a/docs/docs/tutorials/noirjs_app.md +++ b/docs/docs/tutorials/noirjs_app.md @@ -21,7 +21,7 @@ In this guide, we will be pinned to 0.17.0. Make sure you have Node installed on your machine by opening a terminal and executing `node --version`. If you don't see a version, you should install [node](https://github.com/nvm-sh/nvm). You can also use `yarn` if you prefer that package manager over npm (which comes with node). -First of all, follow the the [Nargo guide](@site/docs/getting_started/installation/index.md) to install nargo version 0.17.0 and create a new project with `nargo new circuit`. Once there, `cd` into the `circuit` folder. You should then be able to compile your circuit into `json` format and see it inside the `target` folder: +First of all, follow the the [Nargo guide](../getting_started/installation/index.md) to install nargo version 0.17.0 and create a new project with `nargo new circuit`. Once there, `cd` into the `circuit` folder. You should then be able to compile your circuit into `json` format and see it inside the `target` folder: ```bash nargo compile diff --git a/docs/docusaurus.config.ts b/docs/docusaurus.config.ts index 98ef1fd680c..7516d35c6d9 100644 --- a/docs/docusaurus.config.ts +++ b/docs/docusaurus.config.ts @@ -40,7 +40,7 @@ export default { }, blog: false, theme: { - customCss: './src/css/custom.css', + customCss: ['./src/css/custom.css', './src/css/sidebar.css'], }, }, ], diff --git a/docs/package.json b/docs/package.json index 86f15b0a311..1fa4ab79b85 100644 --- a/docs/package.json +++ b/docs/package.json @@ -3,9 +3,9 @@ "version": "0.0.0", "private": true, "scripts": { - "start": "docusaurus start", + "start": "yarn version::stables && docusaurus start", "build": "yarn version::stables && docusaurus build", - "version::stables": "node --loader ts-node/esm ./scripts/setStable.ts", + "version::stables": "ts-node ./scripts/setStable.ts", "serve": "serve build" }, "dependencies": { diff --git a/docs/scripts/setStable.ts b/docs/scripts/setStable.ts index e23d990763a..0f86c4afd59 100644 --- a/docs/scripts/setStable.ts +++ b/docs/scripts/setStable.ts @@ -1,13 +1,13 @@ +/* eslint-disable @typescript-eslint/no-var-requires */ const fs = require('fs'); const path = require('path'); const axios = require('axios'); +const GITHUB_PAGES = 3; const IGNORE_VERSIONS = ['0.16.0']; -const NUMBER_OF_VERSIONS_TO_SHOW = 4; +const NUMBER_OF_VERSIONS_TO_SHOW = 2; async function main() { - const versionsFile = path.resolve('../versions.json'); - const axiosOpts = { params: { per_page: 100 }, headers: {}, @@ -15,24 +15,41 @@ async function main() { if (process.env.GITHUB_TOKEN) axiosOpts.headers = { Authorization: `token ${process.env.GITHUB_TOKEN}` }; - const { data } = await axios.get('https://api.github.com/repos/noir-lang/noir/releases', axiosOpts); - - const all = data.map((release) => release.tag_name); - console.log('All versions: ', all); - const aztecs = data.filter((release) => release.tag_name.includes('aztec')).map((release) => release.tag_name); - console.log('Removing aztecs: ', aztecs); - const prereleases = data.filter((release) => !release.prerelease).map((release) => release.tag_name); - console.log('Removing prereleases: ', prereleases); - - const stables = data - .filter((release) => !release.prerelease && !release.tag_name.includes('aztec')) - .filter((release) => !IGNORE_VERSIONS.includes(release.tag_name.replace('v', ''))) - .map((release) => release.tag_name) - .slice(0, NUMBER_OF_VERSIONS_TO_SHOW); - - console.log('Stables: ', stables); - - fs.writeFileSync(versionsFile, JSON.stringify(stables, null, 2)); + let stables = []; + console.log('Retrieved versions:'); + + for (let i = 0; i < GITHUB_PAGES; i++) { + const { data } = await axios.get(`https://api.github.com/repos/noir-lang/noir/releases?page=${i + 1}`, axiosOpts); + + console.log(data.map((release) => release.tag_name)); + stables.push( + ...data + .filter( + (release) => + !release.prerelease && !release.tag_name.includes('aztec') && !release.tag_name.includes('aztec'), + ) + .filter((release) => !IGNORE_VERSIONS.includes(release.tag_name.replace('v', ''))) + .map((release) => release.tag_name), + ); + } + + stables = stables.slice(0, NUMBER_OF_VERSIONS_TO_SHOW); + + console.log('Filtered down to stables: ', stables); + + const onlyLatestPatches = []; + const minorsSet = new Set(stables.map((el) => el.split('.')[1])); + for (const minor of minorsSet) { + const minorVersions = stables.filter((el) => el.split('.')[1] === minor); + const max = minorVersions.reduce((prev, current) => { + return prev > current ? prev : current; + }); + onlyLatestPatches.push(max); + } + + console.log('Only latest patches: ', onlyLatestPatches); + + fs.writeFileSync(path.resolve(__dirname, '../versions.json'), JSON.stringify(onlyLatestPatches, null, 2)); } main(); diff --git a/docs/sidebars.js b/docs/sidebars.js index 016ead14a8f..f1e79ba9ebc 100644 --- a/docs/sidebars.js +++ b/docs/sidebars.js @@ -21,10 +21,15 @@ export default { items: [ { type: 'autogenerated', - dirName: 'explanations', + dirName: 'noir', }, ], }, + { + type: 'html', + value: '
', + defaultStyle: true, + }, { type: 'category', label: 'How To Guides', @@ -35,6 +40,16 @@ export default { }, ], }, + { + type: 'category', + label: 'Explainers', + items: [ + { + type: 'autogenerated', + dirName: 'explainers', + }, + ], + }, { type: 'category', label: 'Tutorials', @@ -50,6 +65,11 @@ export default { label: 'Reference', items: [{ type: 'autogenerated', dirName: 'reference' }], }, + { + type: 'html', + value: '
', + defaultStyle: true, + }, { type: 'doc', id: 'migration_notes', diff --git a/docs/src/css/sidebar.css b/docs/src/css/sidebar.css new file mode 100644 index 00000000000..3c03c374058 --- /dev/null +++ b/docs/src/css/sidebar.css @@ -0,0 +1,4 @@ +.divider { + border-top: 2px solid #eee; + margin: 0.5em 0; +} diff --git a/docs/tsconfig.json b/docs/tsconfig.json index 01b56ec5988..241fcf4b5e3 100644 --- a/docs/tsconfig.json +++ b/docs/tsconfig.json @@ -1,6 +1,7 @@ { "extends": "@docusaurus/tsconfig", "compilerOptions": { - "baseUrl": "." + "baseUrl": ".", + "downlevelIteration": true }, } diff --git a/docs/versioned_docs/version-v../explainers/explainer-recursion.md b/docs/versioned_docs/version-v../explainers/explainer-recursion.md new file mode 100644 index 00000000000..cc431a878dc --- /dev/null +++ b/docs/versioned_docs/version-v../explainers/explainer-recursion.md @@ -0,0 +1,175 @@ +--- +title: Recursive proofs +description: Explore the concept of recursive proofs in Zero-Knowledge programming. Understand how recursion works in Noir, a language for writing smart contracts on the EVM blockchain. Learn through practical examples like Alice and Bob's guessing game, Charlie's recursive merkle tree, and Daniel's reusable components. Discover how to use recursive proofs to optimize computational resources and improve efficiency. + +keywords: + [ + "Recursive Proofs", + "Zero-Knowledge Programming", + "Noir", + "EVM Blockchain", + "Smart Contracts", + "Recursion in Noir", + "Alice and Bob Guessing Game", + "Recursive Merkle Tree", + "Reusable Components", + "Optimizing Computational Resources", + "Improving Efficiency", + "Verification Key", + "Aggregation Objects", + "Recursive zkSNARK schemes", + "PLONK", + "Proving and Verification Keys" + ] +sidebar_position: 1 +--- + +In programming, we tend to think of recursion as something calling itself. A classic example would be the calculation of the factorial of a number: + +```js +function factorial(n) { + if (n === 0 || n === 1) { + return 1; + } else { + return n * factorial(n - 1); + } +} +``` + +In this case, while `n` is not `1`, this function will keep calling itself until it hits the base case, bubbling up the result on the call stack: + +```md + Is `n` 1? <--------- + /\ / + / \ n = n -1 + / \ / + Yes No -------- +``` + +In Zero-Knowledge, recursion has some similarities. + +It is not a Noir function calling itself, but a proof being used as an input to another circuit. In short, you verify one proof *inside* another proof, returning the proof that both proofs are valid. + +This means that, given enough computational resources, you can prove the correctness of any arbitrary number of proofs in a single proof. This could be useful to design state channels (for which a common example would be [Bitcoin's Lightning Network](https://en.wikipedia.org/wiki/Lightning_Network)), to save on gas costs by settling one proof on-chain, or simply to make business logic less dependent on a consensus mechanism. + +## Examples + +Let us look at some of these examples + +### Alice and Bob - Guessing game + +Alice and Bob are friends, and they like guessing games. They want to play a guessing game online, but for that, they need a trusted third-party that knows both of their secrets and finishes the game once someone wins. + +So, they use zero-knowledge proofs. Alice tries to guess Bob's number, and Bob will generate a ZK proof stating whether she succeeded or failed. + +This ZK proof can go on a smart contract, revealing the winner and even giving prizes. However, this means every turn needs to be verified on-chain. This incurs some cost and waiting time that may simply make the game too expensive or time-consuming to be worth it. + +So, Alice started thinking: "what if Bob generates his proof, and instead of sending it on-chain, I verify it *within* my own proof before playing my own turn?". She can then generate a proof that she verified his proof, and so on. + +```md + Did you fail? <-------------------------- + / \ / + / \ n = n -1 + / \ / + Yes No / + | | / + | | / + | You win / + | / + | / +Generate proof of that / + + / + my own guess ---------------- +``` + +### Charlie - Recursive merkle tree + +Charlie is a concerned citizen, and wants to be sure his vote in an election is accounted for. He votes with a ZK proof, but he has no way of knowing that his ZK proof was included in the total vote count! + +So, the tallier puts all the votes in a merkle tree, and everyone can also prove the verification of two proofs within one proof, as such: + +```md + abcd + __________|______________ + | | + ab cd + _____|_____ ______|______ + | | | | + alice bob charlie daniel +``` + +Doing this recursively allows us to arrive on a final proof `abcd` which if true, verifies the correctness of all the votes. + +### Daniel - Reusable components + +Daniel has a big circuit and a big headache. A part of his circuit is a setup phase that finishes with some assertions that need to be made. But that section alone takes most of the proving time, and is largely independent of the rest of the circuit. + +He could find it more efficient to generate a proof for that setup phase separately, and verifying it in his actual business logic section of the circuit. This will allow for parallelization of both proofs, which results in a considerable speedup. + +## What params do I need + +As you can see in the [recursion reference](noir/standard_library/recursion.md), a simple recursive proof requires: + +- The proof to verify +- The Verification Key of the circuit that generated the proof +- A hash of this verification key, as it's needed for some backends +- The public inputs for the proof +- The input aggregation object + +It also returns the `output aggregation object`. These aggregation objects can be confusing at times, so let's dive in a little bit. + +### Aggregation objects + +Recursive zkSNARK schemes do not necessarily "verify a proof" in the sense that you expect a true or false to be spit out by the verifier. Rather an aggregation object is built over the public inputs. + +In the case of PLONK the recursive aggregation object is two G1 points (expressed as 16 witness values). The final verifier (in our case this is most often the smart contract verifier) has to be aware of this aggregation object to execute a pairing and check the validity of these points. + +So, taking the example of Alice and Bob and their guessing game: + +- Alice makes her guess. Her proof is *not* recursive: it doesn't verify any proof within it! It's just a standard `assert(x != y)` circuit +- Bob verifies Alice's proof and makes his own guess. In this circuit, he is verifying a proof, so it needs to output an `aggregation object`: he is generating a recursive proof! +- Alice verifies Bob's *recursive proof*, and uses Bob's `output aggregation object` as the `input aggregation object` in her proof... Which in turn, generates another `output aggregation object`. + +One should notice that when Bob generates his first proof, he has no input aggregation object. Because he is not verifying an recursive proof, he has no `input aggregation object`. In this case, he may use zeros instead. + +We can imagine the `aggregation object` as the baton in a [relay race](https://en.wikipedia.org/wiki/Relay_race). The first runner doesn't have to receive the baton from anyone else, as he/she already starts with it. But when his/her turn is over, the next runner needs to receive it, run a bit more, and pass it along. Even though every runner could theoretically verify the baton mid-run (why not? 🏃🔍), only at the end of the race does the referee verify that the whole race is valid. + +## Some architecture + +As with everything in computer science, there's no one-size-fits all. But there are some patterns that could help understanding and implementing them. To give three examples: + +### Adding some logic to a proof verification + +This would be an approach for something like our guessing game, where proofs are sent back and forth and are verified by each opponent. This circuit would be divided in two sections: + +- A `recursive verification` section, which would be just the call to `std::verify_proof`, and that would be skipped on the first move (since there's no proof to verify) +- A `guessing` section, which is basically the logic part where the actual guessing happens + +In such a situation, and assuming Alice is first, she would skip the first part and try to guess Bob's number. Bob would then verify her proof on the first section of his run, and try to guess Alice's number on the second part, and so on. + +### Aggregating proofs + +In some one-way interaction situations, recursiveness would allow for aggregation of simple proofs that don't need to be immediately verified on-chain or elsewhere. + +To give a practical example, a barman wouldn't need to verify a "proof-of-age" on-chain every time he serves alcohol to a customer. Instead, the architecture would comprise two circuits: + +- A `main`, non-recursive circuit with some logic +- A `recursive` circuit meant to verify two proofs in one proof + +The customer's proofs would be intermediate, and made on their phones, and the barman could just verify them locally. He would then aggregate them into a final proof sent on-chain (or elsewhere) at the end of the day. + +### Recursively verifying different circuits + +Nothing prevents you from verifying different circuits in a recursive proof, for example: + +- A `circuit1` circuit +- A `circuit2` circuit +- A `recursive` circuit + +In this example, a regulator could verify that taxes were paid for a specific purchase by aggregating both a `payer` circuit (proving that a purchase was made and taxes were paid), and a `receipt` circuit (proving that the payment was received) + +## How fast is it + +At the time of writing, verifying recursive proofs is surprisingly fast. This is because most of the time is spent on generating the verification key that will be used to generate the next proof. So you are able to cache the verification key and reuse it later. + +Currently, Noir JS packages don't expose the functionality of loading proving and verification keys, but that feature exists in the underlying `bb.js` package. diff --git a/docs/versioned_docs/version-v../explanations/noir/traits.md b/docs/versioned_docs/version-v../explanations/noir/traits.md new file mode 100644 index 00000000000..7ba07e74f40 --- /dev/null +++ b/docs/versioned_docs/version-v../explanations/noir/traits.md @@ -0,0 +1,348 @@ +--- +title: Traits +description: + Traits in Noir can be used to abstract out a common interface for functions across + several data types. +keywords: [noir programming language, traits, interfaces, generic, protocol] +--- + +## Overview + +Traits in Noir are a useful abstraction similar to interfaces or protocols in other languages. Each trait defines +the interface of several methods contained within the trait. Types can then implement this trait by providing +implementations for these methods. For example in the program: + +```rust +struct Rectangle { + width: Field, + height: Field, +} + +impl Rectangle { + fn area(self) -> Field { + self.width * self.height + } +} + +fn log_area(r: Rectangle) { + println(r.area()); +} +``` + +We have a function `log_area` to log the area of a `Rectangle`. Now how should we change the program if we want this +function to work on `Triangle`s as well?: + +```rust +struct Triangle { + width: Field, + height: Field, +} + +impl Triangle { + fn area(self) -> Field { + self.width * self.height / 2 + } +} +``` + +Making `log_area` generic over all types `T` would be invalid since not all types have an `area` method. Instead, we can +introduce a new `Area` trait and make `log_area` generic over all types `T` that implement `Area`: + +```rust +trait Area { + fn area(self) -> Field; +} + +fn log_area(shape: T) where T: Area { + println(shape.area()); +} +``` + +We also need to explicitly implement `Area` for `Rectangle` and `Triangle`. We can do that by changing their existing +impls slightly. Note that the parameter types and return type of each of our `area` methods must match those defined +by the `Area` trait. + +```rust +impl Area for Rectangle { + fn area(self) -> Field { + self.width * self.height + } +} + +impl Area for Triangle { + fn area(self) -> Field { + self.width * self.height / 2 + } +} +``` + +Now we have a working program that is generic over any type of Shape that is used! Others can even use this program +as a library with their own types - such as `Circle` - as long as they also implement `Area` for these types. + +## Where Clauses + +As seen in `log_area` above, when we want to create a function or method that is generic over any type that implements +a trait, we can add a where clause to the generic function. + +```rust +fn log_area(shape: T) where T: Area { + println(shape.area()); +} +``` + +It is also possible to apply multiple trait constraints on the same variable at once by combining traits with the `+` +operator. Similarly, we can have multiple trait constraints by separating each with a comma: + +```rust +fn foo(elements: [T], thing: U) where + T: Default + Add + Eq, + U: Bar, +{ + let mut sum = T::default(); + + for element in elements { + sum += element; + } + + if sum == T::default() { + thing.bar(); + } +} +``` + +## Generic Implementations + +You can add generics to a trait implementation by adding the generic list after the `impl` keyword: + +```rust +trait Second { + fn second(self) -> Field; +} + +impl Second for (T, Field) { + fn second(self) -> Field { + self.1 + } +} +``` + +You can also implement a trait for every type this way: + +```rust +trait Debug { + fn debug(self); +} + +impl Debug for T { + fn debug(self) { + println(self); + } +} + +fn main() { + 1.debug(); +} +``` + +### Generic Trait Implementations With Where Clauses + +Where clauses can also be placed on trait implementations themselves to restrict generics in a similar way. +For example, while `impl Foo for T` implements the trait `Foo` for every type, `impl Foo for T where T: Bar` +will implement `Foo` only for types that also implement `Bar`. This is often used for implementing generic types. +For example, here is the implementation for array equality: + +```rust +impl Eq for [T; N] where T: Eq { + // Test if two arrays have the same elements. + // Because both arrays must have length N, we know their lengths already match. + fn eq(self, other: Self) -> bool { + let mut result = true; + + for i in 0 .. self.len() { + // The T: Eq constraint is needed to call == on the array elements here + result &= self[i] == other[i]; + } + + result + } +} +``` + +## Trait Methods With No `self` + +A trait can contain any number of methods, each of which have access to the `Self` type which represents each type +that eventually implements the trait. Similarly, the `self` variable is available as well but is not required to be used. +For example, we can define a trait to create a default value for a type. This trait will need to return the `Self` type +but doesn't need to take any parameters: + +```rust +trait Default { + fn default() -> Self; +} +``` + +Implementing this trait can be done similarly to any other trait: + +```rust +impl Default for Field { + fn default() -> Field { + 0 + } +} + +struct MyType {} + +impl Default for MyType { + fn default() -> Field { + MyType {} + } +} +``` + +However, since there is no `self` parameter, we cannot call it via the method call syntax `object.method()`. +Instead, we'll need to refer to the function directly. This can be done either by referring to the +specific impl `MyType::default()` or referring to the trait itself `Default::default()`. In the later +case, type inference determines the impl that is selected. + +```rust +let my_struct = MyStruct::default(); + +let x: Field = Default::default(); +let result = x + Default::default(); +``` + +:::warning + +```rust +let _ = Default::default(); +``` + +If type inference cannot select which impl to use because of an ambiguous `Self` type, an impl will be +arbitrarily selected. This occurs most often when the result of a trait function call with no parameters +is unused. To avoid this, when calling a trait function with no `self` or `Self` parameters or return type, +always refer to it via the implementation type's namespace - e.g. `MyType::default()`. +This is set to change to an error in future Noir versions. + +::: + +## Default Method Implementations + +A trait can also have default implementations of its methods by giving a body to the desired functions. +Note that this body must be valid for all types that may implement the trait. As a result, the only +valid operations on `self` will be operations valid for any type or other operations on the trait itself. + +```rust +trait Numeric { + fn add(self, other: Self) -> Self; + + // Default implementation of double is (self + self) + fn double(self) -> Self { + self.add(self) + } +} +``` + +When implementing a trait with default functions, a type may choose to implement only the required functions: + +```rust +impl Numeric for Field { + fn add(self, other: Field) -> Field { + self + other + } +} +``` + +Or it may implement the optional methods as well: + +```rust +impl Numeric for u32 { + fn add(self, other: u32) -> u32 { + self + other + } + + fn double(self) -> u32 { + self * 2 + } +} +``` + +## Impl Specialization + +When implementing traits for a generic type it is possible to implement the trait for only a certain combination +of generics. This can be either as an optimization or because those specific generics are required to implement the trait. + +```rust +trait Sub { + fn sub(self, other: Self) -> Self; +} + +struct NonZero { + value: T, +} + +impl Sub for NonZero { + fn sub(self, other: Self) -> Self { + let value = self.value - other.value; + assert(value != 0); + NonZero { value } + } +} +``` + +## Overlapping Implementations + +Overlapping implementations are disallowed by Noir to ensure Noir's decision on which impl to select is never ambiguous. +This means if a trait `Foo` is already implemented +by a type `Bar` for all `T`, then we cannot also have a separate impl for `Bar` (or any other +type argument). Similarly, if there is an impl for all `T` such as `impl Debug for T`, we cannot create +any more impls to `Debug` for other types since it would be ambiguous which impl to choose for any given +method call. + +```rust +trait Trait {} + +// Previous impl defined here +impl Trait for (A, B) {} + +// error: Impl for type `(Field, Field)` overlaps with existing impl +impl Trait for (Field, Field) {} +``` + +## Trait Coherence + +Another restriction on trait implementations is coherence. This restriction ensures other crates cannot create +impls that may overlap with other impls, even if several unrelated crates are used as dependencies in the same +program. + +The coherence restriction is: to implement a trait, either the trait itself or the object type must be declared +in the crate the impl is in. + +In practice this often comes up when using types provided by libraries. If a library provides a type `Foo` that does +not implement a trait in the standard library such as `Default`, you may not `impl Default for Foo` in your own crate. +While restrictive, this prevents later issues or silent changes in the program if the `Foo` library later added its +own impl for `Default`. If you are a user of the `Foo` library in this scenario and need a trait not implemented by the +library your choices are to either submit a patch to the library or use the newtype pattern. + +### The Newtype Pattern + +The newtype pattern gets around the coherence restriction by creating a new wrapper type around the library type +that we cannot create `impl`s for. Since the new wrapper type is defined in our current crate, we can create +impls for any trait we need on it. + +```rust +struct Wrapper { + foo: dep::some_library::Foo, +} + +impl Default for Wrapper { + fn default() -> Wrapper { + Wrapper { + foo: dep::some_library::Foo::new(), + } + } +} +``` + +Since we have an impl for our own type, the behavior of this code will not change even if `some_library` is updated +to provide its own `impl Default for Foo`. The downside of this pattern is that it requires extra wrapping and +unwrapping of values when converting to and from the `Wrapper` and `Foo` types. diff --git a/docs/versioned_docs/version-v../explanations/standard_library/traits.md b/docs/versioned_docs/version-v../explanations/standard_library/traits.md new file mode 100644 index 00000000000..63b4f3d6f0b --- /dev/null +++ b/docs/versioned_docs/version-v../explanations/standard_library/traits.md @@ -0,0 +1,140 @@ +--- +title: Traits +description: Noir's stdlib provides a few commonly used traits. +keywords: [traits, trait, interface, protocol, default, add, eq] +--- + +## `std::default` + +### `std::default::Default` + +```rust +trait Default { + fn default() -> Self; +} +``` + +Constructs a default value of a type. + +Implementations: +```rust +impl Default for Field { .. } + +impl Default for i8 { .. } +impl Default for i16 { .. } +impl Default for i32 { .. } +impl Default for i64 { .. } + +impl Default for u8 { .. } +impl Default for u16 { .. } +impl Default for u32 { .. } +impl Default for u64 { .. } + +impl Default for () { .. } +impl Default for bool { .. } + +impl Default for [T; N] + where T: Default { .. } + +impl Default for (A, B) + where A: Default, B: Default { .. } + +impl Default for (A, B, C) + where A: Default, B: Default, C: Default { .. } + +impl Default for (A, B, C, D) + where A: Default, B: Default, C: Default, D: Default { .. } + +impl Default for (A, B, C, D, E) + where A: Default, B: Default, C: Default, D: Default, E: Default { .. } +``` + +For primitive integer types, the return value of `default` is `0`. Container +types such as arrays are filled with default values of their element type. + +## `std::ops` + +### `std::ops::Eq` + +```rust +trait Eq { + fn eq(self, other: Self) -> bool; +} +``` +Returns `true` if `self` is equal to `other`. + +Implementations: +```rust +impl Eq for Field { .. } + +impl Eq for i8 { .. } +impl Eq for i16 { .. } +impl Eq for i32 { .. } +impl Eq for i64 { .. } + +impl Eq for u8 { .. } +impl Eq for u16 { .. } +impl Eq for u32 { .. } +impl Eq for u64 { .. } + +impl Eq for () { .. } +impl Eq for bool { .. } + +impl Eq for [T; N] + where T: Eq { .. } + +impl Eq for (A, B) + where A: Eq, B: Eq { .. } + +impl Eq for (A, B, C) + where A: Eq, B: Eq, C: Eq { .. } + +impl Eq for (A, B, C, D) + where A: Eq, B: Eq, C: Eq, D: Eq { .. } + +impl Eq for (A, B, C, D, E) + where A: Eq, B: Eq, C: Eq, D: Eq, E: Eq { .. } +``` + +### `std::ops::Add`, `std::ops::Sub`, `std::ops::Mul`, and `std::ops::Div` + +These traits abstract over addition, subtraction, multiplication, and division respectively. +Although Noir does not currently have operator overloading, in the future implementing these +traits for a given type will also allow that type to be used with the corresponding operator +for that trait (`+` for Add, etc) in addition to the normal method names. + +```rust +trait Add { + fn add(self, other: Self) -> Self; +} + +trait Sub { + fn sub(self, other: Self) -> Self; +} + +trait Mul { + fn mul(self, other: Self) -> Self; +} + +trait Div { + fn div(self, other: Self) -> Self; +} +``` + +The implementations block below is given for the `Add` trait, but the same types that implement +`Add` also implement `Sub`, `Mul`, and `Div`. + +Implementations: +```rust +impl Add for Field { .. } + +impl Add for i8 { .. } +impl Add for i16 { .. } +impl Add for i32 { .. } +impl Add for i64 { .. } + +impl Add for u8 { .. } +impl Add for u16 { .. } +impl Add for u32 { .. } +impl Add for u64 { .. } +``` diff --git a/docs/docs/explanations/noir/_category_.json b/docs/versioned_docs/version-v../getting_started/_category_.json similarity index 76% rename from docs/docs/explanations/noir/_category_.json rename to docs/versioned_docs/version-v../getting_started/_category_.json index 448d8987d1a..5d694210bbf 100644 --- a/docs/docs/explanations/noir/_category_.json +++ b/docs/versioned_docs/version-v../getting_started/_category_.json @@ -1,5 +1,4 @@ { - "label": "Noir", "position": 0, "collapsible": true, "collapsed": true diff --git a/docs/versioned_docs/version-v../getting_started/create_a_project.md b/docs/versioned_docs/version-v../getting_started/create_a_project.md new file mode 100644 index 00000000000..f10916c39c5 --- /dev/null +++ b/docs/versioned_docs/version-v../getting_started/create_a_project.md @@ -0,0 +1,142 @@ +--- +title: Creating A Project +description: + Learn how to create and verify your first Noir program using Nargo, a programming language for + zero-knowledge proofs. +keywords: + [ + Nargo, + Noir, + zero-knowledge proofs, + programming language, + create Noir program, + verify Noir program, + step-by-step guide, + ] +sidebar_position: 1 + +--- + +Now that we have installed Nargo, it is time to make our first hello world program! + +## Create a Project Directory + +Noir code can live anywhere on your computer. Let us create a _projects_ folder in the home +directory to house our Noir programs. + +For Linux, macOS, and Windows PowerShell, create the directory and change directory into it by +running: + +```sh +mkdir ~/projects +cd ~/projects +``` + +## Create Our First Nargo Project + +Now that we are in the projects directory, create a new Nargo project by running: + +```sh +nargo new hello_world +``` + +> **Note:** `hello_world` can be any arbitrary project name, we are simply using `hello_world` for +> demonstration. +> +> In production, the common practice is to name the project folder as `circuits` for better +> identifiability when sitting alongside other folders in the codebase (e.g. `contracts`, `scripts`, +> `test`). + +A `hello_world` folder would be created. Similar to Rust, the folder houses _src/main.nr_ and +_Nargo.toml_ that contains the source code and environmental options of your Noir program +respectively. + +### Intro to Noir Syntax + +Let us take a closer look at _main.nr_. The default _main.nr_ generated should look like this: + +```rust +fn main(x : Field, y : pub Field) { + assert(x != y); +} +``` + +The first line of the program specifies the program's inputs: + +```rust +x : Field, y : pub Field +``` + +Program inputs in Noir are private by default (e.g. `x`), but can be labeled public using the +keyword `pub` (e.g. `y`). To learn more about private and public values, check the +[Data Types](../noir/syntax/data_types/index.md) section. + +The next line of the program specifies its body: + +```rust +assert(x != y); +``` + +The Noir syntax `assert` can be interpreted as something similar to constraints in other zk-contract languages. + +For more Noir syntax, check the [Language Concepts](../noir/syntax/comments.md) chapter. + +## Build In/Output Files + +Change directory into _hello_world_ and build in/output files for your Noir program by running: + +```sh +cd hello_world +nargo check +``` + +Two additional files would be generated in your project directory: + +_Prover.toml_ houses input values, and _Verifier.toml_ houses public values. + +## Prove Our Noir Program + +Now that the project is set up, we can create a proof of correct execution on our Noir program. + +Fill in input values for execution in the _Prover.toml_ file. For example: + +```toml +x = "1" +y = "2" +``` + +Prove the valid execution of your Noir program: + +```sh +nargo prove +``` + +A new folder _proofs_ would then be generated in your project directory, containing the proof file +`.proof`, where the project name is defined in Nargo.toml. + +The _Verifier.toml_ file would also be updated with the public values computed from program +execution (in this case the value of `y`): + +```toml +y = "0x0000000000000000000000000000000000000000000000000000000000000002" +``` + +> **Note:** Values in _Verifier.toml_ are computed as 32-byte hex values. + +## Verify Our Noir Program + +Once a proof is generated, we can verify correct execution of our Noir program by verifying the +proof file. + +Verify your proof by running: + +```sh +nargo verify +``` + +The verification will complete in silence if it is successful. If it fails, it will log the +corresponding error instead. + +Congratulations, you have now created and verified a proof for your very first Noir program! + +In the [next section](./project_breakdown.md), we will go into more detail on each step performed. diff --git a/docs/versioned_docs/version-v../getting_started/installation/_category_.json b/docs/versioned_docs/version-v../getting_started/installation/_category_.json new file mode 100644 index 00000000000..0c02fb5d4d7 --- /dev/null +++ b/docs/versioned_docs/version-v../getting_started/installation/_category_.json @@ -0,0 +1,6 @@ +{ + "position": 0, + "label": "Install Nargo", + "collapsible": true, + "collapsed": true +} diff --git a/docs/versioned_docs/version-v../getting_started/installation/index.md b/docs/versioned_docs/version-v../getting_started/installation/index.md new file mode 100644 index 00000000000..ddb8a250eb4 --- /dev/null +++ b/docs/versioned_docs/version-v../getting_started/installation/index.md @@ -0,0 +1,45 @@ +--- +title: Nargo Installation +description: + nargo is a command line tool for interacting with Noir programs. This page is a quick guide on how to install Nargo though the most common and easy method, noirup +keywords: [ + Nargo + Noir + Rust + Cargo + Noirup + Installation + Terminal Commands + Version Check + Nightlies + Specific Versions + Branches + Noirup Repository +] +--- + +`nargo` is the one-stop-shop for almost everything related with Noir. The name comes from our love for Rust and its package manager `cargo`. + +With `nargo`, you can start new projects, compile, execute, prove, verify, test, generate solidity contracts, and do pretty much all that is available in Noir. + +Similarly to `rustup`, we also maintain an easy installation method that covers most machines: `noirup`. + +## Installing Noirup + +Open a terminal on your machine, and write: + +```bash +curl -L https://raw.githubusercontent.com/noir-lang/noirup/main/install | bash +``` + +Close the terminal, open another one, and run + +```bash +noirup +``` + +Done. That's it. You should have the latest version working. You can check with `nargo --version`. + +You can also install nightlies, specific versions +or branches, check out the [noirup repository](https://github.com/noir-lang/noirup) for more +information. diff --git a/docs/versioned_docs/version-v../getting_started/installation/other_install_methods.md b/docs/versioned_docs/version-v../getting_started/installation/other_install_methods.md new file mode 100644 index 00000000000..36f05657277 --- /dev/null +++ b/docs/versioned_docs/version-v../getting_started/installation/other_install_methods.md @@ -0,0 +1,190 @@ +--- +title: Alternative Install Methods +description: + There are different ways to install Nargo, the one-stop shop and command-line tool for developing Noir programs. This guide explains other methods that don't rely on noirup, such as compiling from source, installing from binaries, and using WSL for windows +keywords: [ + Installation + Nargo + Noirup + Binaries + Compiling from Source + WSL for Windows + macOS + Linux + Nix + Direnv + Shell & editor experience + Building and testing + Uninstalling Nargo + Noir vs code extension +] +sidebar_position: 1 +--- + + +## Installation + +The most common method of installing Nargo is through [Noirup](./index.md) + +However, there are other methods for installing Nargo: + +- [Binaries](#binaries) +- [Compiling from Source](#compile-from-source) +- [WSL for Windows](#wsl-for-windows) + +### Binaries + +See [GitHub Releases](https://github.com/noir-lang/noir/releases) for the latest and previous +platform specific binaries. + +#### Step 1 + +Paste and run the following in the terminal to extract and install the binary: + +> **macOS / Linux:** If you are prompted with `Permission denied` when running commands, prepend +> `sudo` and re-run it. + +##### macOS (Apple Silicon) + +```bash +mkdir -p $HOME/.nargo/bin && \ +curl -o $HOME/.nargo/bin/nargo-aarch64-apple-darwin.tar.gz -L https://github.com/noir-lang/noir/releases/download/v0.6.0/nargo-aarch64-apple-darwin.tar.gz && \ +tar -xvf $HOME/.nargo/bin/nargo-aarch64-apple-darwin.tar.gz -C $HOME/.nargo/bin/ && \ +echo '\nexport PATH=$PATH:$HOME/.nargo/bin' >> ~/.zshrc && \ +source ~/.zshrc +``` + +##### macOS (Intel) + +```bash +mkdir -p $HOME/.nargo/bin && \ +curl -o $HOME/.nargo/bin/nargo-x86_64-apple-darwin.tar.gz -L https://github.com/noir-lang/noir/releases/download/v0.6.0/nargo-x86_64-apple-darwin.tar.gz && \ +tar -xvf $HOME/.nargo/bin/nargo-x86_64-apple-darwin.tar.gz -C $HOME/.nargo/bin/ && \ +echo '\nexport PATH=$PATH:$HOME/.nargo/bin' >> ~/.zshrc && \ +source ~/.zshrc +``` + +##### Linux (Bash) + +```bash +mkdir -p $HOME/.nargo/bin && \ +curl -o $HOME/.nargo/bin/nargo-x86_64-unknown-linux-gnu.tar.gz -L https://github.com/noir-lang/noir/releases/download/v0.6.0/nargo-x86_64-unknown-linux-gnu.tar.gz && \ +tar -xvf $HOME/.nargo/bin/nargo-x86_64-unknown-linux-gnu.tar.gz -C $HOME/.nargo/bin/ && \ +echo -e '\nexport PATH=$PATH:$HOME/.nargo/bin' >> ~/.bashrc && \ +source ~/.bashrc +``` + +#### Step 2 + +Check if the installation was successful by running `nargo --version`. You should get a version number. + +> **macOS:** If you are prompted with an OS alert, right-click and open the _nargo_ executable from +> Finder. Close the new terminal popped up and `nargo` should now be accessible. + +### Option 3: Compile from Source + +Due to the large number of native dependencies, Noir projects uses [Nix](https://nixos.org/) and [direnv](https://direnv.net/) to streamline the development experience. It helps mitigating ssues commonly associated with dependency management, such as conflicts between required package versions for different projects (often referred to as "dependency hell"). + +Combined with direnv, which automatically sets or unsets environment variables based on the directory, it further simplifies the development process by seamlessly integrating with the developer's shell, facilitating an efficient and reliable workflow for managing and deploying Noir projects with multiple dependencies. + +#### Setting up your environment + +For the best experience, please follow these instructions to setup your environment: + +1. Install Nix following [their guide](https://nixos.org/download.html) for your operating system. +2. Create the file `~/.config/nix/nix.conf` with the contents: + +```ini +experimental-features = nix-command +extra-experimental-features = flakes +``` + +3. Install direnv into your Nix profile by running: + +```sh +nix profile install nixpkgs#direnv +``` + +4. Add direnv to your shell following [their guide](https://direnv.net/docs/hook.html). + 1. For bash or zshell, add `eval "$(direnv hook bash)"` or `eval "$(direnv hook zsh)"` to your ~/.bashrc or ~/.zshrc file, respectively. +5. Restart your shell. + +#### Shell & editor experience + +Now that your environment is set up, you can get to work on the project. + +1. Clone the repository, such as: + +```sh +git clone git@github.com:noir-lang/noir +``` + +> Replacing `noir` with whichever repository you want to work on. + +2. Navigate to the directory: + +```sh +cd noir +``` + +> Replacing `noir` with whichever repository you cloned. + +3. You should see a **direnv error** because projects aren't allowed by default. Make sure you've reviewed and trust our `.envrc` file, then you need to run: + +```sh +direnv allow +``` + +4. Now, wait awhile for all the native dependencies to be built. This will take some time and direnv will warn you that it is taking a long time, but we just need to let it run. + +5. Once you are presented with your prompt again, you can start your editor within the project directory (we recommend [VSCode](https://code.visualstudio.com/)): + +```sh +code . +``` + +6. (Recommended) When launching VSCode for the first time, you should be prompted to install our recommended plugins. We highly recommend installing these for the best development experience. + +#### Building and testing + +Assuming you are using `direnv` to populate your environment, building and testing the project can be done +with the typical `cargo build`, `cargo test`, and `cargo clippy` commands. You'll notice that the `cargo` version matches the version we specify in `rust-toolchain.toml`, which is 1.71.1 at the time of this writing. + +If you want to build the entire project in an isolated sandbox, you can use Nix commands: + +1. `nix build .` (or `nix build . -L` for verbose output) to build the project in a Nix sandbox. +2. `nix flake check` (or `nix flake check -L` for verbose output) to run clippy and tests in a Nix sandbox. + +#### Without `direnv` + +If you have hesitations with using direnv, you can launch a subshell with `nix develop` and then launch your editor from within the subshell. However, if VSCode was already launched in the project directory, the environment won't be updated. + +Advanced: If you aren't using direnv nor launching your editor within the subshell, you can try to install Barretenberg and other global dependencies the package needs. This is an advanced workflow and likely won't receive support! + +### Option 4: WSL (for Windows) + +The default backend for Noir (Barretenberg) doesn't provide Windows binaries at this time. For that reason, Noir cannot be installed nativerly. However, it is available by using Windows Subsystem for Linux (WSL). + +Step 1: Follow the instructions [here](https://learn.microsoft.com/en-us/windows/wsl/install) to install and run WSL. + +step 2: Follow the [Noirup instructions](./index.md). + +## Uninstalling Nargo + +### Noirup + +If you installed Noir with `noirup`, you can uninstall Noir by removing the files in `~/.nargo`, `~/nargo` and `~/noir_cache`. + +```bash +rm -r ~/.nargo +rm -r ~/nargo +rm -r ~/noir_cache +``` + +### Nix + +If you installed Noir with Nix or from source, you can remove the binary located at `~/.nix-profile/bin/nargo`. + +```bash +rm ~/.nix-profile/bin/nargo +``` diff --git a/docs/versioned_docs/version-v../getting_started/project_breakdown.md b/docs/versioned_docs/version-v../getting_started/project_breakdown.md new file mode 100644 index 00000000000..5a214804f7b --- /dev/null +++ b/docs/versioned_docs/version-v../getting_started/project_breakdown.md @@ -0,0 +1,199 @@ +--- +title: Project Breakdown +description: + Learn about the anatomy of a Nargo project, including the purpose of the Prover and Verifier TOML + files, and how to prove and verify your program. +keywords: + [Nargo, Nargo project, Prover.toml, Verifier.toml, proof verification, private asset transfer] +sidebar_position: 2 +--- + +This section breaks down our hello world program in section _1.2_. We elaborate on the project +structure and what the `prove` and `verify` commands did in the previous section. + +## Anatomy of a Nargo Project + +Upon creating a new project with `nargo new` and building the in/output files with `nargo check` +commands, you would get a minimal Nargo project of the following structure: + + - src + - Prover.toml + - Verifier.toml + - Nargo.toml + +The source directory _src_ holds the source code for your Noir program. By default only a _main.nr_ +file will be generated within it. + +### Prover.toml + +_Prover.toml_ is used for specifying the input values for executing and proving the program. You can specify `toml` files with different names by using the `--prover-name` or `-p` flags, see the [Prover](#provertoml) section below. Optionally you may specify expected output values for prove-time checking as well. + +### Verifier.toml + +_Verifier.toml_ contains public in/output values computed when executing the Noir program. + +### Nargo.toml + +_Nargo.toml_ contains the environmental options of your project. It contains a "package" section and a "dependencies" section. + +Example Nargo.toml: + +```toml +[package] +name = "noirstarter" +type = "bin" +authors = ["Alice"] +compiler_version = "0.9.0" +description = "Getting started with Noir" +entry = "circuit/main.nr" +license = "MIT" + +[dependencies] +ecrecover = {tag = "v0.9.0", git = "https://github.com/colinnielsen/ecrecover-noir.git"} +``` + +Nargo.toml for a [workspace](../noir/modules_packages_crates/workspaces.md) will look a bit different. For example: + +```toml +[workspace] +members = ["crates/a", "crates/b"] +default-member = "crates/a" +``` + +#### Package section + +The package section requires a number of fields including: + +- `name` (**required**) - the name of the package +- `type` (**required**) - can be "bin", "lib", or "contract" to specify whether its a binary, library or Aztec contract +- `authors` (optional) - authors of the project +- `compiler_version` - specifies the version of the compiler to use. This is enforced by the compiler and follow's [Rust's versioning](https://doc.rust-lang.org/cargo/reference/manifest.html#the-version-field), so a `compiler_version = 0.18.0` will enforce Nargo version 0.18.0, `compiler_version = ^0.18.0` will enforce anything above 0.18.0 but below 0.19.0, etc. For more information, see how [Rust handles these operators](https://docs.rs/semver/latest/semver/enum.Op.html) +- `description` (optional) +- `entry` (optional) - a relative filepath to use as the entry point into your package (overrides the default of `src/lib.nr` or `src/main.nr`) +- `backend` (optional) +- `license` (optional) + +#### Dependencies section + +This is where you will specify any dependencies for your project. See the [Dependencies page](../noir/modules_packages_crates/dependencies.md) for more info. + +`./proofs/` and `./contract/` directories will not be immediately visible until you create a proof or +verifier contract respectively. + +### main.nr + +The _main.nr_ file contains a `main` method, this method is the entry point into your Noir program. + +In our sample program, _main.nr_ looks like this: + +```rust +fn main(x : Field, y : Field) { + assert(x != y); +} +``` + +The parameters `x` and `y` can be seen as the API for the program and must be supplied by the +prover. Since neither `x` nor `y` is marked as public, the verifier does not supply any inputs, when +verifying the proof. + +The prover supplies the values for `x` and `y` in the _Prover.toml_ file. + +As for the program body, `assert` ensures the satisfaction of the condition (e.g. `x != y`) is +constrained by the proof of the execution of said program (i.e. if the condition was not met, the +verifier would reject the proof as an invalid proof). + +### Prover.toml + +The _Prover.toml_ file is a file which the prover uses to supply his witness values(both private and +public). + +In our hello world program the _Prover.toml_ file looks like this: + +```toml +x = "1" +y = "2" +``` + +When the command `nargo prove` is executed, two processes happen: + +1. Noir creates a proof that `x` which holds the value of `1` and `y` which holds the value of `2` + is not equal. This not equal constraint is due to the line `assert(x != y)`. + +2. Noir creates and stores the proof of this statement in the _proofs_ directory in a file called your-project.proof. So if your project is named "private_voting" (defined in the project Nargo.toml), the proof will be saved at `./proofs/private_voting.proof`. Opening this file will display the proof in hex format. + +#### Arrays of Structs + +The following code shows how to pass an array of structs to a Noir program to generate a proof. + +```rust +// main.nr +struct Foo { + bar: Field, + baz: Field, +} + +fn main(foos: [Foo; 3]) -> pub Field { + foos[2].bar + foos[2].baz +} +``` + +Prover.toml: + +```toml +[[foos]] # foos[0] +bar = 0 +baz = 0 + +[[foos]] # foos[1] +bar = 0 +baz = 0 + +[[foos]] # foos[2] +bar = 1 +baz = 2 +``` + +#### Custom toml files + +You can specify a `toml` file with a different name to use for proving by using the `--prover-name` or `-p` flags. + +This command looks for proof inputs in the default **Prover.toml** and generates the proof and saves it at `./proofs/.proof`: + +```bash +nargo prove +``` + +This command looks for proof inputs in the custom **OtherProver.toml** and generates proof and saves it at `./proofs/.proof`: + +```bash +nargo prove -p OtherProver +``` + +## Verifying a Proof + +When the command `nargo verify` is executed, two processes happen: + +1. Noir checks in the _proofs_ directory for a proof file with the project name (eg. test_project.proof) + +2. If that file is found, the proof's validity is checked + +> **Note:** The validity of the proof is linked to the current Noir program; if the program is +> changed and the verifier verifies the proof, it will fail because the proof is not valid for the +> _modified_ Noir program. + +In production, the prover and the verifier are usually two separate entities. A prover would +retrieve the necessary inputs, execute the Noir program, generate a proof and pass it to the +verifier. The verifier would then retrieve the public inputs from usually external sources and +verifies the validity of the proof against it. + +Take a private asset transfer as an example: + +A user on browser as the prover would retrieve private inputs (e.g. the user's private key) and +public inputs (e.g. the user's encrypted balance on-chain), compute the transfer, generate a proof +and submit it to the verifier smart contract. + +The verifier contract would then draw the user's encrypted balance directly from the blockchain and +verify the proof submitted against it. If the verification passes, additional functions in the +verifier contract could trigger (e.g. approve the asset transfer). + +Now that you understand the concepts, you'll probably want some editor feedback while you are writing more complex code. diff --git a/docs/docs/explanations/_category_.json b/docs/versioned_docs/version-v../getting_started/tooling/_category_.json similarity index 74% rename from docs/docs/explanations/_category_.json rename to docs/versioned_docs/version-v../getting_started/tooling/_category_.json index 151ee204894..dff520ebc41 100644 --- a/docs/docs/explanations/_category_.json +++ b/docs/versioned_docs/version-v../getting_started/tooling/_category_.json @@ -1,5 +1,6 @@ { "position": 3, + "label": "Tooling", "collapsible": true, "collapsed": true } diff --git a/docs/versioned_docs/version-v../getting_started/tooling/index.md b/docs/versioned_docs/version-v../getting_started/tooling/index.md new file mode 100644 index 00000000000..55df833005a --- /dev/null +++ b/docs/versioned_docs/version-v../getting_started/tooling/index.md @@ -0,0 +1,33 @@ +--- +title: Tooling +Description: This section provides information about the various tools and utilities available for Noir development. It covers the Noir playground, IDE tools, Codespaces, and community projects. +Keywords: [Noir, Development, Playground, IDE Tools, Language Service Provider, VS Code Extension, Codespaces, noir-starter, Community Projects, Awesome Noir Repository, Developer Tooling] +--- + +Noir is meant to be easy to develop with. For that reason, a number of utilities have been put together to ease the development process as much as feasible in the zero-knowledge world. + +## Playground + +The Noir playground is an easy way to test small ideas, share snippets, and integrate in other websites. You can access it at [play.noir-lang.org](https://play.noir-lang.org). + +## IDE tools + +When you install Nargo, you're also installing a Language Service Provider (LSP), which can be used by IDEs to provide syntax highlighting, codelens, warnings, and more. + +The easiest way to use these tools is by installing the [Noir VS Code extension](https://marketplace.visualstudio.com/items?itemName=noir-lang.vscode-noir). + +## Codespaces + +Some Noir repos have leveraged Codespaces in order to ease the development process. You can visit the [noir-starter](https://github.com/noir-lang/noir-starter) for an example. + +## GitHub Actions + +You can use `noirup` with GitHub Actions for CI/CD and automated testing. It is as simple as +installing `noirup` and running tests in your GitHub Action `yml` file. + +See the +[config file in the Noir repo](https://github.com/TomAFrench/noir-hashes/blob/master/.github/workflows/noir.yml) for an example usage. + +## Community projects + +As an open-source project, Noir has received many contributions over time. Some of them are related with developer tooling, and you can see some of them in [Awesome Noir repository](https://github.com/noir-lang/awesome-noir#dev-tools) diff --git a/docs/versioned_docs/version-v../getting_started/tooling/language_server.md b/docs/versioned_docs/version-v../getting_started/tooling/language_server.md new file mode 100644 index 00000000000..81e0356ef8a --- /dev/null +++ b/docs/versioned_docs/version-v../getting_started/tooling/language_server.md @@ -0,0 +1,43 @@ +--- +title: Language Server +description: Learn about the Noir Language Server, how to install the components, and configuration that may be required. +keywords: [Nargo, Language Server, LSP, VSCode, Visual Studio Code] +sidebar_position: 0 +--- + +This section helps you install and configure the Noir Language Server. + +The Language Server Protocol (LSP) has two components, the [Server](#language-server) and the [Client](#language-client). Below we describe each in the context of Noir. + +## Language Server + +The Server component is provided by the Nargo command line tool that you installed at the beginning of this guide. +As long as Nargo is installed and you've used it to run other commands in this guide, it should be good to go! + +If you'd like to verify that the `nargo lsp` command is available, you can run `nargo --help` and look for `lsp` in the list of commands. If you see it, you're using a version of Noir with LSP support. + +## Language Client + +The Client component is usually an editor plugin that launches the Server. It communicates LSP messages between the editor and the Server. For example, when you save a file, the Client will alert the Server, so it can try to compile the project and report any errors. + +Currently, Noir provides a Language Client for Visual Studio Code via the [vscode-noir](https://github.com/noir-lang/vscode-noir) extension. You can install it via the [Visual Studio Marketplace](https://marketplace.visualstudio.com/items?itemName=noir-lang.vscode-noir). + +> **Note:** Noir's Language Server Protocol support currently assumes users' VSCode workspace root to be the same as users' Noir project root (i.e. where Nargo.toml lies). +> +> If LSP features seem to be missing / malfunctioning, make sure you are opening your Noir project directly (instead of as a sub-folder) in your VSCode instance. + +When your language server is running correctly and the VSCode plugin is installed, you should see handy codelens buttons for compilation, measuring circuit size, execution, and tests: + +![Compile and Execute](@site/static/img/codelens_compile_execute.png) +![Run test](@site/static/img/codelens_run_test.png) + +You should also see your tests in the `testing` panel: + +![Testing panel](@site/static/img/codelens_testing_panel.png) + +### Configuration + +- **Noir: Enable LSP** - If checked, the extension will launch the Language Server via `nargo lsp` and communicate with it. +- **Noir: Nargo Flags** - Additional flags may be specified if you require them to be added when the extension calls `nargo lsp`. +- **Noir: Nargo Path** - An absolute path to a Nargo binary with the `lsp` command. This may be useful if Nargo is not within the `PATH` of your editor. +- **Noir > Trace: Server** - Setting this to `"messages"` or `"verbose"` will log LSP messages between the Client and Server. Useful for debugging. diff --git a/docs/versioned_docs/version-v../getting_started/tooling/testing.md b/docs/versioned_docs/version-v../getting_started/tooling/testing.md new file mode 100644 index 00000000000..868a061200d --- /dev/null +++ b/docs/versioned_docs/version-v../getting_started/tooling/testing.md @@ -0,0 +1,62 @@ +--- +title: Testing in Noir +description: Learn how to use Nargo to test your Noir program in a quick and easy way +keywords: [Nargo, testing, Noir, compile, test] +sidebar_position: 1 +--- + +You can test your Noir programs using Noir circuits. + +Nargo will automatically compile and run any functions which have the decorator `#[test]` on them if +you run `nargo test`. + +For example if you have a program like: + +```rust +fn add(x: u64, y: u64) -> u64 { + x + y +} +#[test] +fn test_add() { + assert(add(2,2) == 4); + assert(add(0,1) == 1); + assert(add(1,0) == 1); +} +``` + +Running `nargo test` will test that the `test_add` function can be executed while satisfying the all +the contraints which allows you to test that add returns the expected values. Test functions can't +have any arguments currently. + +### Test fail + +You can write tests that are expected to fail by using the decorator `#[test(should_fail)]`. For example: + +```rust +fn add(x: u64, y: u64) -> u64 { + x + y +} +#[test(should_fail)] +fn test_add() { + assert(add(2,2) == 5); +} +``` + +You can be more specific and make it fail with a specific reason by using `should_fail_with = "`: + +```rust +fn main(african_swallow_avg_speed : Field) { + assert(african_swallow_avg_speed == 65, "What is the airspeed velocity of an unladen swallow"); +} + +#[test] +fn test_king_arthur() { + main(65); +} + +#[test(should_fail_with = "What is the airspeed velocity of an unladen swallow")] +fn test_bridgekeeper() { + main(32); +} + +``` diff --git a/docs/versioned_docs/version-v../how_to/_category_.json b/docs/versioned_docs/version-v../how_to/_category_.json new file mode 100644 index 00000000000..23b560f610b --- /dev/null +++ b/docs/versioned_docs/version-v../how_to/_category_.json @@ -0,0 +1,5 @@ +{ + "position": 1, + "collapsible": true, + "collapsed": true +} diff --git a/docs/versioned_docs/version-v../how_to/how-to-recursion.md b/docs/versioned_docs/version-v../how_to/how-to-recursion.md new file mode 100644 index 00000000000..226f7e6e73d --- /dev/null +++ b/docs/versioned_docs/version-v../how_to/how-to-recursion.md @@ -0,0 +1,184 @@ +--- +title: How to use recursion on NoirJS +description: Learn how to implement recursion with NoirJS, a powerful tool for creating smart contracts on the EVM blockchain. This guide assumes familiarity with NoirJS, solidity verifiers, and the Barretenberg proving backend. Discover how to generate both final and intermediate proofs using `noir_js` and `backend_barretenberg`. +keywords: + [ + "NoirJS", + "EVM blockchain", + "smart contracts", + "recursion", + "solidity verifiers", + "Barretenberg backend", + "noir_js", + "backend_barretenberg", + "intermediate proofs", + "final proofs", + "nargo compile", + "json import", + "recursive circuit", + "recursive app" + ] +sidebar_position: 1 +--- + +This guide shows you how to use recursive proofs in your NoirJS app. For the sake of clarity, it is assumed that: + +- You already have a NoirJS app. If you don't, please visit the [NoirJS tutorial](../tutorials/noirjs_app.md) and the [reference](../reference/NoirJS/noir_js/index.md). +- You are familiar with what are recursive proofs and you have read the [recursion explainer](../explainers/explainer-recursion.md) +- You already built a recursive circuit following [the reference](../noir/standard_library/recursion.md), and understand how it works. + +It is also assumed that you're not using `noir_wasm` for compilation, and instead you've used [`nargo compile`](../reference/nargo_commands.md) to generate the `json` you're now importing into your project. However, the guide should work just the same if you're using `noir_wasm`. + +:::info + +As you've read in the [explainer](../explainers/explainer-recursion.md), a recursive proof is an intermediate proof. Meaning it doesn't necessarily generate the final step that makes it verifiable in a smart contract. However, it is easy to verify within another circuit. + +While "standard" usage of NoirJS packages abstracts final proofs, it currently lacks the necessary interface to abstract away intermediate proofs. Which means these proofs need to be created by using the backend directly. + +In short: + +- `noir_js` generates *only* final proofs +- `backend_barretenberg` generates both types of proofs + +::: + +In a standard recursive app, you're also dealing with at least two circuits. For the purpose of this guide, we will assume these two: + +- `main`: a circuit of type `assert(x != y)` +- `recursive`: a circuit that verifies `main` + +For a full example on how recursive proofs work, please refer to the [noir-examples](https://github.com/noir/noir-examples) repository. We will *not* be using it as a reference for this guide. + +## Step 1: Setup + +In a common NoirJS app, you need to instantiate a backend with something like `const backend = new Backend(circuit)`. Then you feed it to the `noir_js` interface. + +For recursiveness, this doesn't happen, and the only need for `noir_js` is only to `execute` a circuit and get its witness and return value. Everything else is not interfaced, so it needs to happen on the `backend` object. + +It is also recommended that you instantiate the backend with as many threads as possible, to allow for maximum concurrency: + +```js +const backend = new Backend(circuit, { threads: 8 }) +``` + +:::tip +You can use the [`os.cpus()`](https://nodejs.org/api/os.html#oscpus) object in `nodejs` or [`navigator.hardwareConcurrency`](https://developer.mozilla.org/en-US/docs/Web/API/Navigator/hardwareConcurrency) on the browser to make the most out of those glorious cpu cores +::: + +## Step 2: Generating the witness and the proof for `main` + +After instantiating the backend, you should also instantiate `noir_js`. We will use it to execute the circuit and get the witness. + +```js +const noir = new Noir(circuit, backend) +const { witness } = noir.execute(input) +``` + +With this witness, you are now able to generate the intermediate proof for the main circuit: + +```js +const { proof, publicInputs } = await backend.generateIntermediateProof(witness) +``` + +:::warning + +Always keep in mind what is actually happening on your development process, otherwise you'll quickly become confused about what circuit are we actually running and why! + +In this case, you can imagine that Alice (running the `main` circuit) is proving something to Bob (running the `recursive` circuit), and Bob is verifying her proof within his proof. + +With this in mind, it becomes clear that our intermediate proof is the one *meant to be verified within another circuit*. So it is Alice's. Actually, the only final proof in this theoretical scenario would be the last one, sent on-chain. + +::: + +## Step 3 - Verification and proof artifacts + +Optionally, you are able to verify the intermediate proof: + +```js +const verified = await backend.verifyIntermediateProof({ proof, publicInputs }) +``` + +This can be useful to make sure our intermediate proof was correctly generated. But the real goal is to do it within another circuit. For that, we need to generate the intermediate artifacts: + +```js +const { proofAsFields, vkAsFields, vkHash } = await backend.generateIntermediateProofArtifacts( { publicInputs, proof }, publicInputsCount) +``` + +This call takes the public inputs and the proof, but also the public inputs count. While this is easily retrievable by simply counting the `publicInputs` length, the backend interface doesn't currently abstract it away. + +:::info + +The `proofAsFields` has a constant size `[Field; 93]`. However, currently the backend doesn't remove the public inputs from the proof when converting it. + +This means that if your `main` circuit has two public inputs, then you should also modify the recursive circuit to accept a proof with the public inputs appended. This means that in our example, since `y` is a public input, our `proofAsFields` is of type `[Field; 94]`. + +Verification keys in Barretenberg are always of size 114. + +::: + +:::warning + +One common mistake is to forget *who* makes this call. + +In a situation where Alice is generating the `main` proof, if she generates the proof artifacts and sends them to Bob, which gladly takes them as true, this would mean Alice could prove anything! + +Instead, Bob needs to make sure *he* extracts the proof artifacts, using his own instance of the `main` circuit backend. This way, Alice has to provide a valid proof for the correct `main` circuit. + +::: + +## Step 4 - Recursive proof generation + +With the artifacts, generating a recursive proof is no different from a normal proof. You simply use the `backend` (with the recursive circuit) to generate it: + +```js +const recursiveInputs = { + verification_key: vkAsFields, // array of length 114 + proof: proofAsFields, // array of length 93 + size of public inputs + publicInputs: [mainInput.y], // using the example above, where `y` is the only public input + key_hash: vkHash, + input_aggregation_object: Array(16).fill(0) // this circuit is verifying a non-recursive proof, so there's no input aggregation object: just use zero +} + +const { witness, returnValue } = noir.execute(recursiveInputs) // we're executing the recursive circuit now! +const { proof, publicInputs } = backend.generateFinalProof(witness) +const verified = backend.verifyFinalProof({ proof, publicInputs }) +``` + +You can obviously chain this proof into another proof. In fact, if you're using recursive proofs, you're probably interested of using them this way! In that case, you should keep in mind the `returnValue`, as it will contain the `input_aggregation_object` for the next proof. + +:::tip + +Managing circuits and "who does what" can be confusing. To make sure your naming is consistent, you can keep them in an object. For example: + +```js +const circuits = { +main: mainJSON, +recursive: recursiveJSON +} +const backends = { +main: new BarretenbergBackend(circuits.main), +recursive: new BarretenbergBackend(circuits.recursive) +} +const noirs = { +main: new Noir(circuits.main, backends.main), +recursive: new Noir(circuits.recursive, backends.recursive) +} +``` + +This allows you to neatly call exactly the method you want without conflicting names: + +```js +// Alice runs this 👇 +const { witness: mainWitness } = await noirs.main.execute(input) +const proof = await backends.main.generateIntermediateProof(mainWitness) + +// Bob runs this 👇 +const verified = await backends.main.verifyIntermediateProof(proof) +const { proofAsFields, vkAsFields, vkHash } = await backends.main.generateIntermediateProofArtifacts( + proof, + numPublicInputs, +); +const recursiveProof = await noirs.recursive.generateFinalProof(recursiveInputs) +``` + +::: diff --git a/docs/versioned_docs/version-v../how_to/merkle-proof.mdx b/docs/versioned_docs/version-v../how_to/merkle-proof.mdx new file mode 100644 index 00000000000..34074659ac1 --- /dev/null +++ b/docs/versioned_docs/version-v../how_to/merkle-proof.mdx @@ -0,0 +1,48 @@ +--- +title: Prove Merkle Tree Membership +description: + Learn how to use merkle membership proof in Noir to prove that a given leaf is a member of a + merkle tree with a specified root, at a given index. +keywords: + [merkle proof, merkle membership proof, Noir, rust, hash function, Pedersen, sha256, merkle tree] +--- + +Let's walk through an example of a merkle membership proof in Noir that proves that a given leaf is +in a merkle tree. + +```rust +use dep::std; + +fn main(message : [Field; 62], index : Field, hashpath : [Field; 40], root : Field) { + let leaf = std::hash::hash_to_field(message); + let merkle_root = std::merkle::compute_merkle_root(leaf, index, hashpath); + assert(merkle_root == root); +} + +``` + +The message is hashed using `hash_to_field`. The specific hash function that is being used is chosen +by the backend. The only requirement is that this hash function can heuristically be used as a +random oracle. If only collision resistance is needed, then one can call `std::hash::pedersen_hash` +instead. + +```rust +let leaf = std::hash::hash_to_field(message); +``` + +The leaf is then passed to a compute_merkle_root function with the root, index and hashpath. The returned root can then be asserted to be the same as the provided root. + +```rust +let merkle_root = std::merkle::compute_merkle_root(leaf, index, hashpath); +assert (merkle_root == root); +``` + +> **Note:** It is possible to re-implement the merkle tree implementation without standard library. +> However, for most usecases, it is enough. In general, the standard library will always opt to be +> as conservative as possible, while striking a balance with efficiency. + +An example, the merkle membership proof, only requires a hash function that has collision +resistance, hence a hash function like Pedersen is allowed, which in most cases is more efficient +than the even more conservative sha256. + +[View an example on the starter repo](https://github.com/noir-lang/noir-examples/blob/3ea09545cabfa464124ec2f3ea8e60c608abe6df/stealthdrop/circuits/src/main.nr#L20) diff --git a/docs/versioned_docs/version-v../how_to/solidity_verifier.md b/docs/versioned_docs/version-v../how_to/solidity_verifier.md new file mode 100644 index 00000000000..8022b0e5f20 --- /dev/null +++ b/docs/versioned_docs/version-v../how_to/solidity_verifier.md @@ -0,0 +1,130 @@ +--- +title: Generate a Solidity Verifier +description: + Learn how to run the verifier as a smart contract on the blockchain. Compile a Solidity verifier + contract for your Noir program and deploy it on any EVM blockchain acting as a verifier smart + contract. Read more to find out +keywords: + [ + solidity verifier, + smart contract, + blockchain, + compiler, + plonk_vk.sol, + EVM blockchain, + verifying Noir programs, + proving backend, + Barretenberg, + ] +sidebar_position: 0 +--- + +For certain applications, it may be desirable to run the verifier as a smart contract instead of on +a local machine. + +Compile a Solidity verifier contract for your Noir program by running: + +```sh +nargo codegen-verifier +``` + +A new `contract` folder would then be generated in your project directory, containing the Solidity +file `plonk_vk.sol`. It can be deployed on any EVM blockchain acting as a verifier smart contract. + +> **Note:** It is possible to compile verifier contracts of Noir programs for other smart contract +> platforms as long as the proving backend supplies an implementation. +> +> Barretenberg, the default proving backend for Nargo, supports compilation of verifier contracts in +> Solidity only for the time being. + +## Verify + +To verify a proof using the Solidity verifier contract, call the `verify` function with the +following signature: + +```solidity +function verify(bytes calldata _proof, bytes32[] calldata _publicInputs) external view returns (bool) +``` + +You can see an example of how the `verify` function is called in the example zk voting application [here](https://github.com/noir-lang/noir-examples/blob/33e598c257e2402ea3a6b68dd4c5ad492bce1b0a/foundry-voting/src/zkVote.sol#L35): + +```solidity +function castVote(bytes calldata proof, uint proposalId, uint vote, bytes32 nullifierHash) public returns (bool) { + // ... + bytes32[] memory publicInputs = new bytes32[](4); + publicInputs[0] = merkleRoot; + publicInputs[1] = bytes32(proposalId); + publicInputs[2] = bytes32(vote); + publicInputs[3] = nullifierHash; + require(verifier.verify(proof, publicInputs), "Invalid proof"); +``` + +### Public Inputs + +:::tip + +A circuit doesn't have the concept of a return value. Return values are just syntactic sugar in +Noir. + +Under the hood, the return value is passed as an input to the circuit and is checked at the end of +the circuit program. + +::: + +The verifier contract uses the output (return) value of a Noir program as a public input. So if you +have the following function + +```rust +fn main( + // Public inputs + pubkey_x: pub Field, + pubkey_y: pub Field, + // Private inputs + priv_key: Field, +) -> pub Field +``` + +then `verify` in `plonk_vk.sol` will expect 3 public inputs. Passing two inputs will result in an +error like `Reason: PUBLIC_INPUT_COUNT_INVALID(3, 2)`. + +In this case the 3 inputs to `verify` would be ordered as `[pubkey_x, pubkey_y, return]`. + +#### Struct inputs + +Consider the following program: + +```rust +struct Type1 { + val1: Field, + val2: Field, +} + +struct Nested { + t1: Type1, + is_true: bool, +} + +fn main(x: pub Field, nested: pub Nested, y: pub Field) { + //... +} +``` + +Structs will be flattened so that the array of inputs is 1-dimensional array. The order of these inputs would be flattened to: `[x, nested.t1.val1, nested.t1.val2, nested.is_true, y]` + +## Noir for EVM chains + +You can currently deploy the Solidity verifier contracts to most EVM compatible chains. EVM chains that have been tested and are known to work include: + +- Optimism +- Arbitrum +- Polygon PoS +- Scroll +- Celo + +Other EVM chains should work, but have not been tested directly by our team. If you test any other chains, please open a PR on this page to update the list. See [this doc](https://github.com/noir-lang/noir-starter/tree/main/with-foundry#testing-on-chain) for more info about testing verifier contracts on different EVM chains. + +### Unsupported chains + +Unfortunately not all "EVM" chains are supported. + +**zkSync** and the **Polygon zkEVM** do _not_ currently support proof verification via Solidity verifier contracts. They are missing the bn256 precompile contract that the verifier contract requires. Once these chains support this precompile, they may work. diff --git a/docs/versioned_docs/version-v../index.md b/docs/versioned_docs/version-v../index.md new file mode 100644 index 00000000000..016832f9f5e --- /dev/null +++ b/docs/versioned_docs/version-v../index.md @@ -0,0 +1,85 @@ +--- +title: Noir +description: + Learn about the public alpha release of Noir, a domain specific language heavily influenced by Rust that compiles to + an intermediate language which can be compiled to an arithmetic circuit or a rank-1 constraint system. +keywords: + [ + Noir, + Domain Specific Language, + Rust, + Intermediate Language, + Arithmetic Circuit, + Rank-1 Constraint System, + Ethereum Developers, + Protocol Developers, + Blockchain Developers, + Proving System, + Smart Contract Language, + ] +sidebar_position: 0 +--- + +## What's new about Noir? + +Noir, a domain-specific language crafted for SNARK proving systems, stands out with its simplicity, flexibility, +and robust capabilities. Unlike conventional approaches that compile directly to a fixed NP-complete language, +Noir takes a two-pronged path. It first compiles to an adaptable intermediate language known as ACIR. From there, +depending on the project's needs, ACIR can be further compiled into an arithmetic circuit for integration with Aztec's +barretenberg backend or transformed into a rank-1 constraint system suitable for R1CS backends like Arkwork's Marlin +backend, among others. + +This innovative design introduces unique challenges, yet it strategically separates the programming language from the +backend. Noir's approach echoes the modular philosophy of LLVM, offering developers a versatile toolkit for cryptographic +programming. + +## Who is Noir for? + +### Solidity Developers + +Noir streamlines the creation of Solidity contracts that interface with SNARK systems. +[`Utilize the nargo codegen-verifier`](./reference/nargo_commands.md#nargo-codegen-verifier) command to construct verifier +contracts efficiently. While the current alpha version offers this as a direct feature, future updates aim +to modularize this process for even greater ease of use. + +Noir currently includes a command to create a Solidity contract which verifies your Noir program. This will be +modularised in the future; however, as of the alpha, you can use the + command to create a verifier contract. + +### Protocol Developers + +Should the Aztec backend not align with your existing tech stack, or if you're inclined to integrate alternative +proving systems, Noir's agnostic compilation to a proof-agnostic intermediate language offers unmatched flexibility. +This allows protocol engineers the freedom to substitute the default PLONK-based system with an alternative of their +choice, tailoring the proving system to their specific needs. + +### Blockchain developers + +Blockchain developers often face environmental constraints, such as predetermined proving systems and smart contract +languages. Noir addresses this by enabling the implementation of custom proving system backends and smart contract +interfaces, ensuring seamless integration with your blockchain's architecture, and expanding the horizons for innovation +within your projects. + +## Libraries + +Noir does not currently have an official package manager. You can find a list of available Noir libraries in the +[awesome-noir repo here](https://github.com/noir-lang/awesome-noir#libraries). + +Some libraries that are available today include: + +- [Standard Library](https://github.com/noir-lang/noir/tree/master/noir_stdlib) - the Noir Standard Library +- [Ethereum Storage Proof Verification](https://github.com/aragonzkresearch/noir-trie-proofs) - a library that contains + the primitives necessary for RLP decoding (in the form of look-up table construction) and Ethereum state and storage + proof verification (or verification of any trie proof involving 32-byte long keys) +- [BigInt](https://github.com/shuklaayush/noir-bigint) - a library that provides a custom BigUint56 data type, allowing + for computations on large unsigned integers +- [ECrecover](https://github.com/colinnielsen/ecrecover-noir/tree/main) - a library to verify an ECDSA signature and + return the source Ethereum address +- [Sparse Merkle Tree Verifier](https://github.com/vocdoni/smtverifier-noir/tree/main) - a library for verification of + sparse Merkle trees +- [Signed Int](https://github.com/resurgencelabs/signed_int) - a library for accessing a custom Signed Integer data + type, allowing access to negative numbers on Noir +- [Fraction](https://github.com/resurgencelabs/fraction) - a library for accessing fractional number data type in Noir, + allowing results that aren't whole numbers + +See the section on [dependencies](noir/modules_packages_crates/dependencies.md) for more information. diff --git a/docs/versioned_docs/version-v../migration_notes.md b/docs/versioned_docs/version-v../migration_notes.md new file mode 100644 index 00000000000..d5d0682cf0c --- /dev/null +++ b/docs/versioned_docs/version-v../migration_notes.md @@ -0,0 +1,91 @@ +--- +title: Migration notes +description: Read about migration notes from previous versions, which could solve problems while updating +keywords: [Noir, notes, migration, updating, upgrading] +--- + +Noir is in full-speed development. Things break fast, wild, and often. This page attempts to leave some notes on errors you might encounter when upgrading and how to resolve them until proper patches are built. + +## ≥0.19 + +### Enforcing `compiler_version` + +From this version on, the compiler will check for the `compiler_version` field in `Nargo.toml`, and will error if it doesn't match the current Nargo version in use. + +To update, please make sure this field in `Nargo.toml` matches the output of `nargo --version`. + +## ≥0.14 + +The index of the [for loops](noir/syntax/control_flow.md#loops) is now of type `u64` instead of `Field`. An example refactor would be: + +```rust +for i in 0..10 { + let i = i as Field; +} +``` + +## ≥v0.11.0 and Nargo backend + +From this version onwards, Nargo starts managing backends through the `nargo backend` command. Upgrading to the versions per usual steps might lead to: + +### `backend encountered an error` + +This is likely due to the existing locally installed version of proving backend (e.g. barretenberg) is incompatible with the version of Nargo in use. + +To fix the issue: + +1. Uninstall the existing backend + +```bash +nargo backend uninstall acvm-backend-barretenberg +``` + +You may replace _acvm-backend-barretenberg_ with the name of your backend listed in `nargo backend ls` or in ~/.nargo/backends. + +2. Reinstall a compatible version of the proving backend. + +If you are using the default barretenberg backend, simply run: + +``` +nargo prove +``` + +with you Noir program. + +This will trigger the download and installation of the latest version of barretenberg compatible with your Nargo in use. + +### `backend encountered an error: illegal instruction` + +On certain Intel-based systems, an `illegal instruction` error may arise due to incompatibility of barretenberg with certain CPU instructions. + +To fix the issue: + +1. Uninstall the existing backend + +```bash +nargo backend uninstall acvm-backend-barretenberg +``` + +You may replace _acvm-backend-barretenberg_ with the name of your backend listed in `nargo backend ls` or in ~/.nargo/backends. + +2. Reinstall a compatible version of the proving backend. + +If you are using the default barretenberg backend, simply run: + +``` +nargo backend install acvm-backend-barretenberg https://github.com/noir-lang/barretenberg-js-binary/raw/master/run-bb.tar.gz +``` + +This downloads and installs a specific bb.js based version of barretenberg binary from GitHub. + +The gzipped filed is running [this bash script](https://github.com/noir-lang/barretenberg-js-binary/blob/master/run-bb-js.sh), where we need to gzip it as the Nargo currently expect the backend to be zipped up. + +Then run: + +``` +DESIRED_BINARY_VERSION=0.8.1 nargo info +``` + +This overrides the bb native binary with a bb.js node application instead, which should be compatible with most if not all hardware. This does come with the drawback of being generally slower than native binary. + +0.8.1 indicates bb.js version 0.8.1, so if you change that it will update to a different version or the default version in the script if none was supplied. diff --git a/docs/versioned_docs/version-v../noir/modules_packages_crates/_category_.json b/docs/versioned_docs/version-v../noir/modules_packages_crates/_category_.json new file mode 100644 index 00000000000..1debcfe7675 --- /dev/null +++ b/docs/versioned_docs/version-v../noir/modules_packages_crates/_category_.json @@ -0,0 +1,6 @@ +{ + "label": "Modules, Packages and Crates", + "position": 2, + "collapsible": true, + "collapsed": true +} diff --git a/docs/versioned_docs/version-v../noir/modules_packages_crates/crates_and_packages.md b/docs/versioned_docs/version-v../noir/modules_packages_crates/crates_and_packages.md new file mode 100644 index 00000000000..aae6795b229 --- /dev/null +++ b/docs/versioned_docs/version-v../noir/modules_packages_crates/crates_and_packages.md @@ -0,0 +1,43 @@ +--- +title: Crates and Packages +description: Learn how to use Crates and Packages in your Noir project +keywords: [Nargo, dependencies, package management, crates, package] +sidebar_position: 0 +--- + +## Crates + +A crate is the smallest amount of code that the Noir compiler considers at a time. +Crates can contain modules, and the modules may be defined in other files that get compiled with the crate, as we’ll see in the coming sections. + +### Crate Types + +A Noir crate can come in several forms: binaries, libraries or contracts. + +#### Binaries + +_Binary crates_ are programs which you can compile to an ACIR circuit which you can then create proofs against. Each must have a function called `main` that defines the ACIR circuit which is to be proved. + +#### Libraries + +_Library crates_ don't have a `main` function and they don't compile down to ACIR. Instead they define functionality intended to be shared with multiple projects, and eventually included in a binary crate. + +#### Contracts + +Contract crates are similar to binary crates in that they compile to ACIR which you can create proofs against. They are different in that they do not have a single `main` function, but are a collection of functions to be deployed to the [Aztec network](https://aztec.network). You can learn more about the technical details of Aztec in the [monorepo](https://github.com/AztecProtocol/aztec-packages) or contract [examples](https://github.com/AztecProtocol/aztec-packages/tree/master/yarn-project/noir-contracts/src/contracts). + +### Crate Root + +Every crate has a root, which is the source file that the compiler starts, this is also known as the root module. The Noir compiler does not enforce any conditions on the name of the file which is the crate root, however if you are compiling via Nargo the crate root must be called `lib.nr` or `main.nr` for library or binary crates respectively. + +## Packages + +A Nargo _package_ is a collection of one of more crates that provides a set of functionality. A package must include a Nargo.toml file. + +A package _must_ contain either a library or a binary crate, but not both. + +### Differences from Cargo Packages + +One notable difference between Rust's Cargo and Noir's Nargo is that while Cargo allows a package to contain an unlimited number of binary crates and a single library crate, Nargo currently only allows a package to contain a single crate. + +In future this restriction may be lifted to allow a Nargo package to contain both a binary and library crate or multiple binary crates. diff --git a/docs/versioned_docs/version-v../noir/modules_packages_crates/dependencies.md b/docs/versioned_docs/version-v../noir/modules_packages_crates/dependencies.md new file mode 100644 index 00000000000..57f0f9fd420 --- /dev/null +++ b/docs/versioned_docs/version-v../noir/modules_packages_crates/dependencies.md @@ -0,0 +1,124 @@ +--- +title: Dependencies +description: + Learn how to specify and manage dependencies in Nargo, allowing you to upload packages to GitHub + and use them easily in your project. +keywords: [Nargo, dependencies, GitHub, package management, versioning] +sidebar_position: 1 +--- + +Nargo allows you to upload packages to GitHub and use them as dependencies. + +## Specifying a dependency + +Specifying a dependency requires a tag to a specific commit and the git url to the url containing +the package. + +Currently, there are no requirements on the tag contents. If requirements are added, it would follow +semver 2.0 guidelines. + +> Note: Without a `tag` , there would be no versioning and dependencies would change each time you +> compile your project. + +For example, to add the [ecrecover-noir library](https://github.com/colinnielsen/ecrecover-noir) to your project, add it to `Nargo.toml`: + +```toml +# Nargo.toml + +[dependencies] +ecrecover = {tag = "v0.8.0", git = "https://github.com/colinnielsen/ecrecover-noir"} +``` + +If the module is in a subdirectory, you can define a subdirectory in your git repository, for example: + +```toml +# Nargo.toml + +[dependencies] +easy_private_token_contract = {tag ="v0.1.0-alpha62", git = "https://github.com/AztecProtocol/aztec-packages", directory = "yarn-project/noir-contracts/src/contracts/easy_private_token_contract"} +``` + +## Specifying a local dependency + +You can also specify dependencies that are local to your machine. + +For example, this file structure has a library and binary crate + +```tree +├── binary_crate +│   ├── Nargo.toml +│   └── src +│   └── main.nr +└── liba + ├── Nargo.toml + └── src + └── lib.nr +``` + +Inside of the binary crate, you can specify: + +```toml +# Nargo.toml + +[dependencies] +libA = { path = "../liba" } +``` + +## Importing dependencies + +You can import a dependency to a Noir file using the following syntax. For example, to import the +ecrecover-noir library and local liba referenced above: + +```rust +use dep::ecrecover; +use dep::libA; +``` + +You can also import only the specific parts of dependency that you want to use, like so: + +```rust +use dep::std::hash::sha256; +use dep::std::scalar_mul::fixed_base_embedded_curve; +``` + +Lastly, as demonstrated in the +[elliptic curve example](../standard_library/cryptographic_primitives/ec_primitives#examples), you +can import multiple items in the same line by enclosing them in curly braces: + +```rust +use dep::std::ec::tecurve::affine::{Curve, Point}; +``` + +We don't have a way to consume libraries from inside a [workspace](./workspaces) as external dependencies right now. + +Inside a workspace, these are consumed as `{ path = "../to_lib" }` dependencies in Nargo.toml. + +## Dependencies of Dependencies + +Note that when you import a dependency, you also get access to all of the dependencies of that package. + +For example, the [phy_vector](https://github.com/resurgencelabs/phy_vector) library imports an [fraction](https://github.com/resurgencelabs/fraction) library. If you're importing the phy_vector library, then you can access the functions in fractions library like so: + +```rust +use dep::phy_vector; + +fn main(x : Field, y : pub Field) { + //... + let f = phy_vector::fraction::toFraction(true, 2, 1); + //... +} +``` + +## Available Libraries + +Noir does not currently have an official package manager. You can find a list of available Noir libraries in the [awesome-noir repo here](https://github.com/noir-lang/awesome-noir#libraries). + +Some libraries that are available today include: + +- [Standard Library](https://github.com/noir-lang/noir/tree/master/noir_stdlib) - the Noir Standard Library +- [Ethereum Storage Proof Verification](https://github.com/aragonzkresearch/noir-trie-proofs) - a library that contains the primitives necessary for RLP decoding (in the form of look-up table construction) and Ethereum state and storage proof verification (or verification of any trie proof involving 32-byte long keys) +- [BigInt](https://github.com/shuklaayush/noir-bigint) - a library that provides a custom BigUint56 data type, allowing for computations on large unsigned integers +- [ECrecover](https://github.com/colinnielsen/ecrecover-noir/tree/main) - a library to verify an ECDSA signature and return the source Ethereum address +- [Sparse Merkle Tree Verifier](https://github.com/vocdoni/smtverifier-noir/tree/main) - a library for verification of sparse Merkle trees +- [Signed Int](https://github.com/resurgencelabs/signed_int) - a library for accessing a custom Signed Integer data type, allowing access to negative numbers on Noir +- [Fraction](https://github.com/resurgencelabs/fraction) - a library for accessing fractional number data type in Noir, allowing results that aren't whole numbers diff --git a/docs/versioned_docs/version-v../noir/modules_packages_crates/modules.md b/docs/versioned_docs/version-v../noir/modules_packages_crates/modules.md new file mode 100644 index 00000000000..f9f15aee8be --- /dev/null +++ b/docs/versioned_docs/version-v../noir/modules_packages_crates/modules.md @@ -0,0 +1,105 @@ +--- +title: Modules +description: + Learn how to organize your files using modules in Noir, following the same convention as Rust's + module system. Examples included. +keywords: [Noir, Rust, modules, organizing files, sub-modules] +sidebar_position: 2 +--- + +Noir's module system follows the same convention as the _newer_ version of Rust's module system. + +## Purpose of Modules + +Modules are used to organise files. Without modules all of your code would need to live in a single +file. In Noir, the compiler does not automatically scan all of your files to detect modules. This +must be done explicitly by the developer. + +## Examples + +### Importing a module in the crate root + +Filename : `src/main.nr` + +```rust +mod foo; + +fn main() { + foo::hello_world(); +} +``` + +Filename : `src/foo.nr` + +```rust +fn from_foo() {} +``` + +In the above snippet, the crate root is the `src/main.nr` file. The compiler sees the module +declaration `mod foo` which prompts it to look for a foo.nr file. + +Visually this module hierarchy looks like the following : + +``` +crate + ├── main + │ + └── foo + └── from_foo + +``` + +### Importing a module throughout the tree + +All modules are accessible from the `crate::` namespace. + +``` +crate + ├── bar + ├── foo + └── main + +``` + +In the above snippet, if `bar` would like to use functions in `foo`, it can do so by `use crate::foo::function_name`. + +### Sub-modules + +Filename : `src/main.nr` + +```rust +mod foo; + +fn main() { + foo::from_foo(); +} +``` + +Filename : `src/foo.nr` + +```rust +mod bar; +fn from_foo() {} +``` + +Filename : `src/foo/bar.nr` + +```rust +fn from_bar() {} +``` + +In the above snippet, we have added an extra module to the module tree; `bar`. `bar` is a submodule +of `foo` hence we declare bar in `foo.nr` with `mod bar`. Since `foo` is not the crate root, the +compiler looks for the file associated with the `bar` module in `src/foo/bar.nr` + +Visually the module hierarchy looks as follows: + +``` +crate + ├── main + │ + └── foo + ├── from_foo + └── bar + └── from_bar +``` diff --git a/docs/versioned_docs/version-v../noir/modules_packages_crates/workspaces.md b/docs/versioned_docs/version-v../noir/modules_packages_crates/workspaces.md new file mode 100644 index 00000000000..67a1dafa372 --- /dev/null +++ b/docs/versioned_docs/version-v../noir/modules_packages_crates/workspaces.md @@ -0,0 +1,40 @@ +--- +title: Workspaces +sidebar_position: 3 +--- + +Workspaces are a feature of nargo that allow you to manage multiple related Noir packages in a single repository. A workspace is essentially a group of related projects that share common build output directories and configurations. + +Each Noir project (with it's own Nargo.toml file) can be thought of as a package. Each package is expected to contain exactly one "named circuit", being the "name" defined in Nargo.toml with the program logic defined in `./src/main.nr`. + +For a project with the following structure: + +```tree +├── crates +│   ├── a +│   │   ├── Nargo.toml +│   │   └── src +│   │   └── main.nr +│   └── b +│   ├── Nargo.toml +│   └── src +│   └── main.nr +├── Nargo.toml +└── Prover.toml +``` + +You can define a workspace in Nargo.toml like so: + +```toml +[workspace] +members = ["crates/a", "crates/b"] +default-member = "crates/a" +``` + +`members` indicates which packages are included in the workspace. As such, all member packages of a workspace will be processed when the `--workspace` flag is used with various commands or if a `default-member` is not specified. + +`default-member` indicates which package various commands process by default. + +Libraries can be defined in a workspace. Inside a workspace, these are consumed as `{ path = "../to_lib" }` dependencies in Nargo.toml. + +Inside a workspace, these are consumed as `{ path = "../to_lib" }` dependencies in Nargo.toml. diff --git a/docs/versioned_docs/version-v../noir/standard_library/_category_.json b/docs/versioned_docs/version-v../noir/standard_library/_category_.json new file mode 100644 index 00000000000..af04c0933fd --- /dev/null +++ b/docs/versioned_docs/version-v../noir/standard_library/_category_.json @@ -0,0 +1,6 @@ +{ + "label": "Standard Library", + "position": 1, + "collapsible": true, + "collapsed": true +} diff --git a/docs/versioned_docs/version-v../noir/standard_library/black_box_fns.md b/docs/versioned_docs/version-v../noir/standard_library/black_box_fns.md new file mode 100644 index 00000000000..1dfabfe8f22 --- /dev/null +++ b/docs/versioned_docs/version-v../noir/standard_library/black_box_fns.md @@ -0,0 +1,46 @@ +--- +title: Black Box Functions +description: Black box functions are functions in Noir that rely on backends implementing support for specialized constraints. +keywords: [noir, black box functions] +--- + +Black box functions are functions in Noir that rely on backends implementing support for specialized constraints. This makes certain zk-snark unfriendly computations cheaper than if they were implemented in Noir. + +:::warning + +It is likely that not all backends will support a particular black box function. + +::: + +Because it is not guaranteed that all backends will support black box functions, it is possible that certain Noir programs won't compile against a particular backend if they use an unsupported black box function. It is possible to fallback to less efficient implementations written in Noir/ACIR in some cases. + +Black box functions are specified with the `#[foreign(black_box_fn)]` attribute. For example, the SHA256 function in the Noir [source code](https://github.com/noir-lang/noir/blob/v0.5.1/noir_stdlib/src/hash.nr) looks like: + +```rust +#[foreign(sha256)] +fn sha256(_input : [u8; N]) -> [u8; 32] {} +``` + +## Function list + +Here is a list of the current black box functions that are supported by UltraPlonk: + +- AES +- [SHA256](./cryptographic_primitives/hashes#sha256) +- [Schnorr signature verification](./cryptographic_primitives/schnorr) +- [Blake2s](./cryptographic_primitives/hashes#blake2s) +- [Pedersen Hash](./cryptographic_primitives/hashes#pedersen_hash) +- [Pedersen Commitment](./cryptographic_primitives/hashes#pedersen_commitment) +- [HashToField128Security](./cryptographic_primitives/hashes#hash_to_field) +- [ECDSA signature verification](./cryptographic_primitives/ecdsa_sig_verification) +- [Fixed base scalar multiplication](./cryptographic_primitives/scalar) +- [Compute merkle root](./merkle_trees#compute_merkle_root) +- AND +- XOR +- RANGE +- [Keccak256](./cryptographic_primitives/hashes#keccak256) +- [Recursive proof verification](./recursion) + +Most black box functions are included as part of the Noir standard library, however `AND`, `XOR` and `RANGE` are used as part of the Noir language syntax. For instance, using the bitwise operator `&` will invoke the `AND` black box function. To ensure compatibility across backends, the ACVM has fallback implementations of `AND`, `XOR` and `RANGE` defined in its standard library which it can seamlessly fallback to if the backend doesn't support them. + +You can view the black box functions defined in the ACVM code [here](https://github.com/noir-lang/acvm/blob/acir-v0.12.0/acir/src/circuit/black_box_functions.rs). diff --git a/docs/versioned_docs/version-v../noir/standard_library/cryptographic_primitives/_category_.json b/docs/versioned_docs/version-v../noir/standard_library/cryptographic_primitives/_category_.json new file mode 100644 index 00000000000..5d694210bbf --- /dev/null +++ b/docs/versioned_docs/version-v../noir/standard_library/cryptographic_primitives/_category_.json @@ -0,0 +1,5 @@ +{ + "position": 0, + "collapsible": true, + "collapsed": true +} diff --git a/docs/versioned_docs/version-v../noir/standard_library/cryptographic_primitives/ec_primitives.md b/docs/versioned_docs/version-v../noir/standard_library/cryptographic_primitives/ec_primitives.md new file mode 100644 index 00000000000..8d573adb3be --- /dev/null +++ b/docs/versioned_docs/version-v../noir/standard_library/cryptographic_primitives/ec_primitives.md @@ -0,0 +1,102 @@ +--- +title: Elliptic Curve Primitives +keywords: [cryptographic primitives, Noir project] +sidebar_position: 4 +--- + +Data structures and methods on them that allow you to carry out computations involving elliptic +curves over the (mathematical) field corresponding to `Field`. For the field currently at our +disposal, applications would involve a curve embedded in BN254, e.g. the +[Baby Jubjub curve](https://eips.ethereum.org/EIPS/eip-2494). + +## Data structures + +### Elliptic curve configurations + +(`std::ec::{tecurve,montcurve,swcurve}::{affine,curvegroup}::Curve`), i.e. the specific elliptic +curve you want to use, which would be specified using any one of the methods +`std::ec::{tecurve,montcurve,swcurve}::{affine,curvegroup}::new` which take the coefficients in the +defining equation together with a generator point as parameters. You can find more detail in the +comments in +[`noir_stdlib/src/ec.nr`](https://github.com/noir-lang/noir/blob/master/noir_stdlib/src/ec.nr), but +the gist of it is that the elliptic curves of interest are usually expressed in one of the standard +forms implemented here (Twisted Edwards, Montgomery and Short Weierstraß), and in addition to that, +you could choose to use `affine` coordinates (Cartesian coordinates - the usual (x,y) - possibly +together with a point at infinity) or `curvegroup` coordinates (some form of projective coordinates +requiring more coordinates but allowing for more efficient implementations of elliptic curve +operations). Conversions between all of these forms are provided, and under the hood these +conversions are done whenever an operation is more efficient in a different representation (or a +mixed coordinate representation is employed). + +### Points + +(`std::ec::{tecurve,montcurve,swcurve}::{affine,curvegroup}::Point`), i.e. points lying on the +elliptic curve. For a curve configuration `c` and a point `p`, it may be checked that `p` +does indeed lie on `c` by calling `c.contains(p1)`. + +## Methods + +(given a choice of curve representation, e.g. use `std::ec::tecurve::affine::Curve` and use +`std::ec::tecurve::affine::Point`) + +- The **zero element** is given by `Point::zero()`, and we can verify whether a point `p: Point` is + zero by calling `p.is_zero()`. +- **Equality**: Points `p1: Point` and `p2: Point` may be checked for equality by calling + `p1.eq(p2)`. +- **Addition**: For `c: Curve` and points `p1: Point` and `p2: Point` on the curve, adding these two + points is accomplished by calling `c.add(p1,p2)`. +- **Negation**: For a point `p: Point`, `p.negate()` is its negation. +- **Subtraction**: For `c` and `p1`, `p2` as above, subtracting `p2` from `p1` is accomplished by + calling `c.subtract(p1,p2)`. +- **Scalar multiplication**: For `c` as above, `p: Point` a point on the curve and `n: Field`, + scalar multiplication is given by `c.mul(n,p)`. If instead `n :: [u1; N]`, i.e. `n` is a bit + array, the `bit_mul` method may be used instead: `c.bit_mul(n,p)` +- **Multi-scalar multiplication**: For `c` as above and arrays `n: [Field; N]` and `p: [Point; N]`, + multi-scalar multiplication is given by `c.msm(n,p)`. +- **Coordinate representation conversions**: The `into_group` method converts a point or curve + configuration in the affine representation to one in the CurveGroup representation, and + `into_affine` goes in the other direction. +- **Curve representation conversions**: `tecurve` and `montcurve` curves and points are equivalent + and may be converted between one another by calling `into_montcurve` or `into_tecurve` on their + configurations or points. `swcurve` is more general and a curve c of one of the other two types + may be converted to this representation by calling `c.into_swcurve()`, whereas a point `p` lying + on the curve given by `c` may be mapped to its corresponding `swcurve` point by calling + `c.map_into_swcurve(p)`. +- **Map-to-curve methods**: The Elligator 2 method of mapping a field element `n: Field` into a + `tecurve` or `montcurve` with configuration `c` may be called as `c.elligator2_map(n)`. For all of + the curve configurations, the SWU map-to-curve method may be called as `c.swu_map(z,n)`, where + `z: Field` depends on `Field` and `c` and must be chosen by the user (the conditions it needs to + satisfy are specified in the comments + [here](https://github.com/noir-lang/noir/blob/master/noir_stdlib/src/ec.nr)). + +## Examples + +The +[ec_baby_jubjub test](https://github.com/noir-lang/noir/blob/master/crates/nargo_cli/tests/test_data/ec_baby_jubjub/src/main.nr) +illustrates all of the above primitives on various forms of the Baby Jubjub curve. A couple of more +interesting examples in Noir would be: + +Public-key cryptography: Given an elliptic curve and a 'base point' on it, determine the public key +from the private key. This is a matter of using scalar multiplication. In the case of Baby Jubjub, +for example, this code would do: + +```rust +use dep::std::ec::tecurve::affine::{Curve, Point}; + +fn bjj_pub_key(priv_key: Field) -> Point +{ + + let bjj = Curve::new(168700, 168696, G::new(995203441582195749578291179787384436505546430278305826713579947235728471134,5472060717959818805561601436314318772137091100104008585924551046643952123905)); + + let base_pt = Point::new(5299619240641551281634865583518297030282874472190772894086521144482721001553, 16950150798460657717958625567821834550301663161624707787222815936182638968203); + + bjj.mul(priv_key,base_pt) +} +``` + +This would come in handy in a Merkle proof. + +- EdDSA signature verification: This is a matter of combining these primitives with a suitable hash + function. See + [feat(stdlib): EdDSA sig verification noir#1136](https://github.com/noir-lang/noir/pull/1136) for + the case of Baby Jubjub and the Poseidon hash function. diff --git a/docs/versioned_docs/version-v../noir/standard_library/cryptographic_primitives/ecdsa_sig_verification.mdx b/docs/versioned_docs/version-v../noir/standard_library/cryptographic_primitives/ecdsa_sig_verification.mdx new file mode 100644 index 00000000000..1376c51dfde --- /dev/null +++ b/docs/versioned_docs/version-v../noir/standard_library/cryptographic_primitives/ecdsa_sig_verification.mdx @@ -0,0 +1,46 @@ +--- +title: ECDSA Signature Verification +description: Learn about the cryptographic primitives regarding ECDSA over the secp256k1 and secp256r1 curves +keywords: [cryptographic primitives, Noir project, ecdsa, secp256k1, secp256r1, signatures] +sidebar_position: 3 +--- + +import BlackBoxInfo from '@site/src/components/Notes/_blackbox.mdx'; + +Noir supports ECDSA signatures verification over the secp256k1 and secp256r1 curves. + +## ecdsa_secp256k1::verify_signature + +Verifier for ECDSA Secp256k1 signatures + +```rust +fn verify_signature(_public_key_x : [u8; 32], _public_key_y : [u8; 32], _signature: [u8; 64], _message: [u8]) -> bool +``` + +example: + +```rust +fn main(hashed_message : [u8;32], pub_key_x : [u8;32], pub_key_y : [u8;32], signature : [u8;64]) { + let valid_signature = std::ecdsa_secp256k1::verify_signature(pub_key_x, pub_key_y, signature, hashed_message); + assert(valid_signature); +} +``` + +## ecdsa_secp256r1::verify_signature + +Verifier for ECDSA Secp256r1 signatures + +```rust +fn verify_signature(_public_key_x : [u8; 32], _public_key_y : [u8; 32], _signature: [u8; 64], _message: [u8]) -> bool +``` + +example: + +```rust +fn main(hashed_message : [u8;32], pub_key_x : [u8;32], pub_key_y : [u8;32], signature : [u8;64]) { + let valid_signature = std::ecdsa_secp256r1::verify_signature(pub_key_x, pub_key_y, signature, hashed_message); + assert(valid_signature); +} +``` + + diff --git a/docs/versioned_docs/version-v../noir/standard_library/cryptographic_primitives/eddsa.mdx b/docs/versioned_docs/version-v../noir/standard_library/cryptographic_primitives/eddsa.mdx new file mode 100644 index 00000000000..a9c10da6c06 --- /dev/null +++ b/docs/versioned_docs/version-v../noir/standard_library/cryptographic_primitives/eddsa.mdx @@ -0,0 +1,18 @@ +--- +title: EdDSA Verification +description: Learn about the cryptographic primitives regarding EdDSA +keywords: [cryptographic primitives, Noir project, eddsa, signatures] +sidebar_position: 5 +--- + +import BlackBoxInfo from '@site/src/components/Notes/_blackbox.mdx'; + +## eddsa::eddsa_poseidon_verify + +Verifier for EdDSA signatures + +```rust +fn eddsa_poseidon_verify(public_key_x : Field, public_key_y : Field, signature_s: Field, signature_r8_x: Field, signature_r8_y: Field, message: Field) -> bool +``` + + diff --git a/docs/versioned_docs/version-v../noir/standard_library/cryptographic_primitives/hashes.mdx b/docs/versioned_docs/version-v../noir/standard_library/cryptographic_primitives/hashes.mdx new file mode 100644 index 00000000000..9250cb4a0c0 --- /dev/null +++ b/docs/versioned_docs/version-v../noir/standard_library/cryptographic_primitives/hashes.mdx @@ -0,0 +1,168 @@ +--- +title: Hash methods +description: + Learn about the cryptographic primitives ready to use for any Noir project, including sha256, + blake2s, pedersen, mimc_bn254 and mimc +keywords: + [cryptographic primitives, Noir project, sha256, blake2s, pedersen, mimc_bn254, mimc, hash] +sidebar_position: 0 +--- + +import BlackBoxInfo from '@site/src/components/Notes/_blackbox.mdx'; + +## sha256 + +Given an array of bytes, returns the resulting sha256 hash. + +```rust +fn sha256(_input : [u8]) -> [u8; 32] +``` + +example: + +```rust +fn main() { + let x = [163, 117, 178, 149]; // some random bytes + let hash = std::hash::sha256(x); +} +``` + + + +## blake2s + +Given an array of bytes, returns an array with the Blake2 hash + +```rust +fn blake2s(_input : [u8]) -> [u8; 32] +``` + +example: + +```rust +fn main() { + let x = [163, 117, 178, 149]; // some random bytes + let hash = std::hash::blake2s(x); +} +``` + + + +## pedersen_hash + +Given an array of Fields, returns the Pedersen hash. + +```rust +fn pedersen_hash(_input : [Field]) -> Field +``` + +example: + +```rust +fn main() { + let x = [163, 117, 178, 149]; // some random bytes + let hash = std::hash::pedersen_hash(x); +} +``` + + + + + +## pedersen_commitment + +Given an array of Fields, returns the Pedersen commitment. + +```rust +fn pedersen_commitment(_input : [Field]) -> [Field; 2] +``` + +example: + +```rust +fn main() { + let x = [163, 117, 178, 149]; // some random bytes + let commitment = std::hash::pedersen_commitment(x); +} +``` + + + +## keccak256 + +Given an array of bytes (`u8`), returns the resulting keccak hash as an array of 32 bytes +(`[u8; 32]`). Specify a message_size to hash only the first `message_size` bytes +of the input. + +```rust +fn keccak256(_input : [u8; N], _message_size: u32) -> [u8; 32] +``` + +example: + +```rust +fn main() { + let x = [163, 117, 178, 149]; // some random bytes + let message_size = 4; + let hash = std::hash::keccak256(x, message_size); +} +``` + + + +## poseidon + +Given an array of Fields, returns a new Field with the Poseidon Hash. Mind that you need to specify +how many inputs are there to your Poseidon function. + +```rust +// example for hash_1, hash_2 accepts an array of length 2, etc +fn hash_1(input: [Field; 1]) -> Field +``` + +example: + +```rust +fn main() +{ + let hash_2 = std::hash::poseidon::bn254::hash_2([1, 2]); + assert(hash2 == 0x115cc0f5e7d690413df64c6b9662e9cf2a3617f2743245519e19607a4417189a); +} +``` + +## mimc_bn254 and mimc + +`mimc_bn254` is `mimc`, but with hardcoded parameters for the BN254 curve. You can use it by +providing an array of Fields, and it returns a Field with the hash. You can use the `mimc` method if +you're willing to input your own constants: + +```rust +fn mimc(x: Field, k: Field, constants: [Field; N], exp : Field) -> Field +``` + +otherwise, use the `mimc_bn254` method: + +```rust +fn mimc_bn254(array: [Field; N]) -> Field +``` + +example: + +```rust + +fn main() { + let x = [163, 117, 178, 149]; // some random bytes + let hash = std::hash::mimc::mimc_bn254(x); +} +``` + +## hash_to_field + +```rust +fn hash_to_field(_input : [Field; N]) -> Field {} +``` + +Calculates the `blake2s` hash of the inputs and returns the hash modulo the field modulus to return +a value which can be represented as a `Field`. + + diff --git a/docs/versioned_docs/version-v../noir/standard_library/cryptographic_primitives/index.md b/docs/versioned_docs/version-v../noir/standard_library/cryptographic_primitives/index.md new file mode 100644 index 00000000000..650f30165d5 --- /dev/null +++ b/docs/versioned_docs/version-v../noir/standard_library/cryptographic_primitives/index.md @@ -0,0 +1,14 @@ +--- +title: Cryptographic Primitives +description: + Learn about the cryptographic primitives ready to use for any Noir project +keywords: + [ + cryptographic primitives, + Noir project, + ] +--- + +The Noir team is progressively adding new cryptographic primitives to the standard library. Reach out for news or if you would be interested in adding more of these calculations in Noir. + +Some methods are available thanks to the Aztec backend, not being performed using Noir. When using other backends, these methods may or may not be supplied. diff --git a/docs/versioned_docs/version-v../noir/standard_library/cryptographic_primitives/scalar.mdx b/docs/versioned_docs/version-v../noir/standard_library/cryptographic_primitives/scalar.mdx new file mode 100644 index 00000000000..1e686303c18 --- /dev/null +++ b/docs/versioned_docs/version-v../noir/standard_library/cryptographic_primitives/scalar.mdx @@ -0,0 +1,28 @@ +--- +title: Scalar multiplication +description: See how you can perform scalar multiplications over a fixed base in Noir +keywords: [cryptographic primitives, Noir project, scalar multiplication] +sidebar_position: 1 +--- + +import BlackBoxInfo from '@site/src/components/Notes/_blackbox.mdx'; + +## scalar_mul::fixed_base_embedded_curve + +Performs scalar multiplication over the embedded curve whose coordinates are defined by the +configured noir field. For the BN254 scalar field, this is BabyJubJub or Grumpkin. + +```rust +fn fixed_base_embedded_curve(_input : Field) -> [Field; 2] +``` + +example + +```rust +fn main(x : Field) { + let scal = std::scalar_mul::fixed_base_embedded_curve(x); + std::println(scal); +} +``` + + diff --git a/docs/versioned_docs/version-v../noir/standard_library/cryptographic_primitives/schnorr.mdx b/docs/versioned_docs/version-v../noir/standard_library/cryptographic_primitives/schnorr.mdx new file mode 100644 index 00000000000..7a2c9c20226 --- /dev/null +++ b/docs/versioned_docs/version-v../noir/standard_library/cryptographic_primitives/schnorr.mdx @@ -0,0 +1,38 @@ +--- +title: Schnorr Signatures +description: Learn how you can verify Schnorr signatures using Noir +keywords: [cryptographic primitives, Noir project, schnorr, signatures] +sidebar_position: 2 +--- + +import BlackBoxInfo from '@site/src/components/Notes/_blackbox.mdx'; + +## schnorr::verify_signature + +Verifier for Schnorr signatures over the embedded curve (for BN254 it is Grumpkin). + +```rust +fn verify_signature(_public_key_x: Field, _public_key_y: Field, _signature: [u8; 64], _message: [u8]) -> bool +``` + +where `_signature` can be generated like so using the npm package +[@noir-lang/barretenberg](https://www.npmjs.com/package/@noir-lang/barretenberg) + +```js +const { BarretenbergWasm } = require('@noir-lang/barretenberg/dest/wasm'); +const { Schnorr } = require('@noir-lang/barretenberg/dest/crypto/schnorr'); + +... + +const barretenberg = await BarretenbergWasm.new(); +const schnorr = new Schnorr(barretenberg); +const pubKey = schnorr.computePublicKey(privateKey); +const message = ... +const signature = Array.from( + schnorr.constructSignature(hash, privateKey).toBuffer() +); + +... +``` + + diff --git a/docs/versioned_docs/version-v../noir/standard_library/logging.md b/docs/versioned_docs/version-v../noir/standard_library/logging.md new file mode 100644 index 00000000000..16daf922e15 --- /dev/null +++ b/docs/versioned_docs/version-v../noir/standard_library/logging.md @@ -0,0 +1,77 @@ +--- +title: Logging +description: + Learn how to use the println statement for debugging in Noir with this tutorial. Understand the + basics of logging in Noir and how to implement it in your code. +keywords: + [ + noir logging, + println statement, + print statement, + debugging in noir, + noir std library, + logging tutorial, + basic logging in noir, + noir logging implementation, + noir debugging techniques, + rust, + ] +--- + +The standard library provides two familiar statements you can use: `println` and `print`. Despite being a limited implementation of rust's `println!` and `print!` macros, these constructs can be useful for debugging. + +You can print the output of both statements in your Noir code by using the `nargo execute` command or the `--show-output` flag when using `nargo test` (provided there are print statements in your tests). + +It is recommended to use `nargo execute` if you want to debug failing constrains with `println` or `print` statements. This is due to every input in a test being a constant rather than a witness, so we issue an error during compilation while we only print during execution (which comes after compilation). Neither `println`, nor `print` are callable for failed constraints caught at compile time. + +Both `print` and `println` are generic functions which can work on integers, fields, strings, and even structs or expressions. Note however, that slices are currently unsupported. For example: + +```rust +use dep::std; + +struct Person { + age : Field, + height : Field, +} + +fn main(age : Field, height : Field) { + let person = Person { age : age, height : height }; + std::println(person); + std::println(age + height); + std::println("Hello world!"); +} +``` + +You can print different types in the same statement (including strings) with a type called `fmtstr`. It can be specified in the same way as a normal string, just prepended with an "f" character: + +```rust + let fmt_str = f"i: {i}, j: {j}"; + std::println(fmt_str); + + let s = myStruct { y: x, x: y }; + std::println(s); + + std::println(f"i: {i}, s: {s}"); + + std::println(x); + std::println([x, y]); + + let foo = fooStruct { my_struct: s, foo: 15 }; + std::println(f"s: {s}, foo: {foo}"); + + std::println(15); // prints 0x0f, implicit Field + std::println(-1 as u8); // prints 255 + std::println(-1 as i8); // prints -1 +``` + +Examples shown above are interchangeable between the two `print` statements: + +```rust +let person = Person { age : age, height : height }; + +std::println(person); +std::print(person); + +std::println("Hello world!"); // Prints with a newline at the end of the input +std::print("Hello world!"); // Prints the input and keeps cursor on the same line +``` diff --git a/docs/versioned_docs/version-v../noir/standard_library/merkle_trees.md b/docs/versioned_docs/version-v../noir/standard_library/merkle_trees.md new file mode 100644 index 00000000000..5b45617812a --- /dev/null +++ b/docs/versioned_docs/version-v../noir/standard_library/merkle_trees.md @@ -0,0 +1,58 @@ +--- +title: Merkle Trees +description: Learn about Merkle Trees in Noir with this tutorial. Explore the basics of computing a merkle root using a proof, with examples. +keywords: + [ + Merkle trees in Noir, + Noir programming language, + check membership, + computing root from leaf, + Noir Merkle tree implementation, + Merkle tree tutorial, + Merkle tree code examples, + Noir libraries, + pedersen hash., + ] +--- + +## compute_merkle_root + +Returns the root of the tree from the provided leaf and its hash path, using a [Pedersen hash](./cryptographic_primitives/hashes.mdx#pedersen_hash). + +```rust +fn compute_merkle_root(leaf : Field, index : Field, hash_path: [Field]) -> Field +``` + +example: + +```rust +/** + // these values are for this example only + index = "0" + priv_key = "0x000000000000000000000000000000000000000000000000000000616c696365" + secret = "0x1929ea3ab8d9106a899386883d9428f8256cfedb3c4f6b66bf4aa4d28a79988f" + note_hash_path = [ + "0x1e61bdae0f027b1b2159e1f9d3f8d00fa668a952dddd822fda80dc745d6f65cc", + "0x0e4223f3925f98934393c74975142bd73079ab0621f4ee133cee050a3c194f1a", + "0x2fd7bb412155bf8693a3bd2a3e7581a679c95c68a052f835dddca85fa1569a40" + ] + */ +fn main(index: Field, priv_key: Field, secret: Field, note_hash_path: [Field; 3]) { + + let pubkey = std::scalar_mul::fixed_base_embedded_curve(priv_key); + let pubkey_x = pubkey[0]; + let pubkey_y = pubkey[1]; + let note_commitment = std::hash::pedersen([pubkey_x, pubkey_y, secret]); + + let root = std::merkle::compute_merkle_root(note_commitment[0], index, note_hash_path); + std::println(root); +} +``` + +To check merkle tree membership: + +1. Include a merkle root as a program input. +2. Compute the merkle root of a given leaf, index and hash path. +3. Assert the merkle roots are equal. + +For more info about merkle trees, see the Wikipedia [page](https://en.wikipedia.org/wiki/Merkle_tree). diff --git a/docs/versioned_docs/version-v../noir/standard_library/options.md b/docs/versioned_docs/version-v../noir/standard_library/options.md new file mode 100644 index 00000000000..3d3139fb98b --- /dev/null +++ b/docs/versioned_docs/version-v../noir/standard_library/options.md @@ -0,0 +1,99 @@ +--- +title: Option Type +--- + +The `Option` type is a way to express that a value might be present (`Some(T))` or absent (`None`). It's a safer way to handle potential absence of values, compared to using nulls in many other languages. + +```rust +struct Option { + None, + Some(T), +} +``` + +You can import the Option type into your Noir program like so: + +```rust +use dep::std::option::Option; + +fn main() { + let none = Option::none(); + let some = Option::some(3); +} +``` + +See [this test](https://github.com/noir-lang/noir/blob/5cbfb9c4a06c8865c98ff2b594464b037d821a5c/crates/nargo_cli/tests/test_data/option/src/main.nr) for a more comprehensive set of examples of each of the methods described below. + +## Methods + +### none + +Constructs a none value. + +### some + +Constructs a some wrapper around a given value. + +### is_none + +Returns true if the Option is None. + +### is_some + +Returns true of the Option is Some. + +### unwrap + +Asserts `self.is_some()` and returns the wrapped value. + +### unwrap_unchecked + +Returns the inner value without asserting `self.is_some()`. This method can be useful within an if condition when we already know that `option.is_some()`. If the option is None, there is no guarantee what value will be returned, only that it will be of type T for an `Option`. + +### unwrap_or + +Returns the wrapped value if `self.is_some()`. Otherwise, returns the given default value. + +### unwrap_or_else + +Returns the wrapped value if `self.is_some()`. Otherwise, calls the given function to return a default value. + +### map + +If self is `Some(x)`, this returns `Some(f(x))`. Otherwise, this returns `None`. + +### map_or + +If self is `Some(x)`, this returns `f(x)`. Otherwise, this returns the given default value. + +### map_or_else + +If self is `Some(x)`, this returns `f(x)`. Otherwise, this returns `default()`. + +### and + +Returns None if self is None. Otherwise, this returns `other`. + +### and_then + +If self is None, this returns None. Otherwise, this calls the given function with the Some value contained within self, and returns the result of that call. In some languages this function is called `flat_map` or `bind`. + +### or + +If self is Some, return self. Otherwise, return `other`. + +### or_else + +If self is Some, return self. Otherwise, return `default()`. + +### xor + +If only one of the two Options is Some, return that option. Otherwise, if both options are Some or both are None, None is returned. + +### filter + +Returns `Some(x)` if self is `Some(x)` and `predicate(x)` is true. Otherwise, this returns `None`. + +### flatten + +Flattens an `Option>` into a `Option`. This returns `None` if the outer Option is None. Otherwise, this returns the inner Option. diff --git a/docs/versioned_docs/version-v../noir/standard_library/recursion.md b/docs/versioned_docs/version-v../noir/standard_library/recursion.md new file mode 100644 index 00000000000..67962082a8f --- /dev/null +++ b/docs/versioned_docs/version-v../noir/standard_library/recursion.md @@ -0,0 +1,90 @@ +--- +title: Recursive Proofs +description: Learn about how to write recursive proofs in Noir. +keywords: [recursion, recursive proofs, verification_key, aggregation object, verify_proof] +--- + +Noir supports recursively verifying proofs, meaning you verify the proof of a Noir program in another Noir program. This enables creating proofs of arbitrary size by doing step-wise verification of smaller components of a large proof. + +The `verify_proof` function takes a verification key, proof and public inputs for a zk program, as well as a key hash and an input aggregation object. The key hash is used to check the validity of the verification key and the input aggregation object is required by some proving systems. The `verify_proof` function returns an output aggregation object that can then be fed into future iterations of the proof verification if required. + +```rust +#[foreign(verify_proof)] +fn verify_proof(_verification_key : [Field], _proof : [Field], _public_input : Field, _key_hash : Field, _input_aggregation_object : [Field]) -> [Field] {} +``` + +:::info + +This is a black box function. Read [this section](./black_box_fns) to learn more about black box functions in Noir. + +::: + +## Example usage + +```rust +use dep::std; + +fn main( + verification_key : [Field; 114], + proof : [Field; 94], + public_inputs : [Field; 1], + key_hash : Field, + input_aggregation_object : [Field; 16], + proof_b : [Field; 94], +) -> pub [Field; 16] { + let output_aggregation_object_a = std::verify_proof( + verification_key.as_slice(), + proof.as_slice(), + public_inputs.as_slice(), + key_hash, + input_aggregation_object + ); + + let output_aggregation_object = std::verify_proof( + verification_key.as_slice(), + proof_b.as_slice(), + public_inputs.as_slice(), + key_hash, + output_aggregation_object_a + ); + + let mut output = [0; 16]; + for i in 0..16 { + output[i] = output_aggregation_object[i]; + } + output +} +``` + +## Parameters + +### `verification_key` + +The verification key for the zk program that is being verified. + +### `proof` + +The proof for the zk program that is being verified. + +### `public_inputs` + +These represent the public inputs of the proof we are verifying. They should be checked against in the circuit after construction of a new aggregation state. + +### `key_hash` + +A key hash is used to check the validity of the verification key. The circuit implementing this opcode can use this hash to ensure that the key provided to the circuit matches the key produced by the circuit creator. + +### `input_aggregation_object` + +An aggregation object is blob of data that the top-level verifier must run some proof system specific algorithm on to complete verification. The size is proof system specific and will be set by the backend integrating this opcode. The input aggregation object is only not `None` when we are verifying a previous recursive aggregation in the current circuit. If this is the first recursive aggregation there is no input aggregation object. It is left to the backend to determine how to handle when there is no input aggregation object. + +## Return value + +### `output_aggregation_object` + +This is the result of a recursive aggregation and is what will be fed into the next verifier. +The next verifier can either perform a final verification (returning true or false) or perform another recursive aggregation where this output aggregation object will be the input aggregation object of the next recursive aggregation. + +## Example + +You can see an example of how to do recursive proofs in [this example recursion demo repo](https://github.com/noir-lang/noir-examples/tree/master/recursion). diff --git a/docs/versioned_docs/version-v../noir/standard_library/zeroed.md b/docs/versioned_docs/version-v../noir/standard_library/zeroed.md new file mode 100644 index 00000000000..97dab02dac2 --- /dev/null +++ b/docs/versioned_docs/version-v../noir/standard_library/zeroed.md @@ -0,0 +1,25 @@ +--- +title: Zeroed Function +description: + The zeroed function returns a zeroed value of any type. +keywords: + [ + zeroed + ] +--- + +Implements `fn zeroed() -> T` to return a zeroed value of any type. This function is generally unsafe to use as the zeroed bit pattern is not guaranteed to be valid for all types. It can however, be useful in cases when the value is guaranteed not to be used such as in a BoundedVec library implementing a growable vector, up to a certain length, backed by an array. The array can be initialized with zeroed values which are guaranteed to be inaccessible until the vector is pushed to. Similarly, enumerations in noir can be implemented using this method by providing zeroed values for the unused variants. + +You can access the function at `std::unsafe::zeroed`. + +This function currently supports the following types: + +- Field +- Bool +- Uint +- Array +- String +- Tuple +- Function + +Using it on other types could result in unexpected behavior. diff --git a/docs/versioned_docs/version-v../noir/syntax/_category_.json b/docs/versioned_docs/version-v../noir/syntax/_category_.json new file mode 100644 index 00000000000..666b691ae91 --- /dev/null +++ b/docs/versioned_docs/version-v../noir/syntax/_category_.json @@ -0,0 +1,6 @@ +{ + "label": "Syntax", + "position": 0, + "collapsible": true, + "collapsed": true +} diff --git a/docs/versioned_docs/version-v../noir/syntax/assert.md b/docs/versioned_docs/version-v../noir/syntax/assert.md new file mode 100644 index 00000000000..c5f9aff139c --- /dev/null +++ b/docs/versioned_docs/version-v../noir/syntax/assert.md @@ -0,0 +1,27 @@ +--- +title: Assert Function +description: + Learn about the assert function in Noir, which can be used to explicitly constrain the predicate or + comparison expression that follows to be true, and what happens if the expression is false at + runtime. +keywords: [Noir programming language, assert statement, predicate expression, comparison expression] +sidebar_position: 4 +--- + +Noir includes a special `assert` function which will explicitly constrain the predicate/comparison +expression that follows to be true. If this expression is false at runtime, the program will fail to +be proven. Example: + +```rust +fn main(x : Field, y : Field) { + assert(x == y); +} +``` + +You can optionally provide a message to be logged when the assertion fails: + +```rust +assert(x == y, "x and y are not equal"); +``` + +> Assertions only work for predicate operations, such as `==`. If there's any ambiguity on the operation, the program will fail to compile. For example, it is unclear if `assert(x + y)` would check for `x + y == 0` or simply would return `true`. diff --git a/docs/versioned_docs/version-v../noir/syntax/comments.md b/docs/versioned_docs/version-v../noir/syntax/comments.md new file mode 100644 index 00000000000..f76ab49094b --- /dev/null +++ b/docs/versioned_docs/version-v../noir/syntax/comments.md @@ -0,0 +1,33 @@ +--- +title: Comments +description: + Learn how to write comments in Noir programming language. A comment is a line of code that is + ignored by the compiler, but it can be read by programmers. Single-line and multi-line comments + are supported in Noir. +keywords: [Noir programming language, comments, single-line comments, multi-line comments] +sidebar_position: 9 +--- + +A comment is a line in your codebase which the compiler ignores, however it can be read by +programmers. + +Here is a single line comment: + +```rust +// This is a comment and is ignored +``` + +`//` is used to tell the compiler to ignore the rest of the line. + +Noir also supports multi-line block comments. Start a block comment with `/*` and end the block with `*/`. + +Noir does not natively support doc comments. You may be able to use [Rust doc comments](https://doc.rust-lang.org/reference/comments.html) in your code to leverage some Rust documentation build tools with Noir code. + +```rust +/* + This is a block comment describing a complex function. +*/ +fn main(x : Field, y : pub Field) { + assert(x != y); +} +``` diff --git a/docs/versioned_docs/version-v../noir/syntax/control_flow.md b/docs/versioned_docs/version-v../noir/syntax/control_flow.md new file mode 100644 index 00000000000..4ce65236db3 --- /dev/null +++ b/docs/versioned_docs/version-v../noir/syntax/control_flow.md @@ -0,0 +1,45 @@ +--- +title: Control Flow +description: + Learn how to use loops and if expressions in the Noir programming language. Discover the syntax + and examples for for loops and if-else statements. +keywords: [Noir programming language, loops, for loop, if-else statements, Rust syntax] +sidebar_position: 2 +--- + +## Loops + +Noir has one kind of loop: the `for` loop. `for` loops allow you to repeat a block of code multiple +times. + +The following block of code between the braces is run 10 times. + +```rust +for i in 0..10 { + // do something +}; +``` + +The index for loops is of type `u64`. + +## If Expressions + +Noir supports `if-else` statements. The syntax is most similar to Rust's where it is not required +for the statement's conditional to be surrounded by parentheses. + +```rust +let a = 0; +let mut x: u32 = 0; + +if a == 0 { + if a != 0 { + x = 6; + } else { + x = 2; + } +} else { + x = 5; + assert(x == 5); +} +assert(x == 2); +``` diff --git a/docs/versioned_docs/version-v../noir/syntax/data_bus.md b/docs/versioned_docs/version-v../noir/syntax/data_bus.md new file mode 100644 index 00000000000..6c7e9b60891 --- /dev/null +++ b/docs/versioned_docs/version-v../noir/syntax/data_bus.md @@ -0,0 +1,21 @@ +--- +title: Data Bus +sidebar_position: 12 +--- +**Disclaimer** this feature is experimental, do not use it! + +The data bus is an optimization that the backend can use to make recursion more efficient. +In order to use it, you must define some inputs of the program entry points (usually the `main()` +function) with the `call_data` modifier, and the return values with the `return_data` modifier. +These modifiers are incompatible with `pub` and `mut` modifiers. + +## Example + +```rust +fn main(mut x: u32, y: call_data u32, z: call_data [u32;4] ) -> return_data u32 { + let a = z[x]; + a+y +} +``` + +As a result, both call_data and return_data will be treated as private inputs and encapsulated into a read-only array each, for the backend to process. diff --git a/docs/versioned_docs/version-v../noir/syntax/data_types/_category_.json b/docs/versioned_docs/version-v../noir/syntax/data_types/_category_.json new file mode 100644 index 00000000000..5d694210bbf --- /dev/null +++ b/docs/versioned_docs/version-v../noir/syntax/data_types/_category_.json @@ -0,0 +1,5 @@ +{ + "position": 0, + "collapsible": true, + "collapsed": true +} diff --git a/docs/versioned_docs/version-v../noir/syntax/data_types/arrays.md b/docs/versioned_docs/version-v../noir/syntax/data_types/arrays.md new file mode 100644 index 00000000000..075d39dadd4 --- /dev/null +++ b/docs/versioned_docs/version-v../noir/syntax/data_types/arrays.md @@ -0,0 +1,245 @@ +--- +title: Arrays +description: + Dive into the Array data type in Noir. Grasp its methods, practical examples, and best practices for efficiently using Arrays in your Noir code. +keywords: + [ + noir, + array type, + methods, + examples, + indexing, + ] +sidebar_position: 4 +--- + +An array is one way of grouping together values into one compound type. Array types can be inferred +or explicitly specified via the syntax `[; ]`: + +```rust +fn main(x : Field, y : Field) { + let my_arr = [x, y]; + let your_arr: [Field; 2] = [x, y]; +} +``` + +Here, both `my_arr` and `your_arr` are instantiated as an array containing two `Field` elements. + +Array elements can be accessed using indexing: + +```rust +fn main() { + let a = [1, 2, 3, 4, 5]; + + let first = a[0]; + let second = a[1]; +} +``` + +All elements in an array must be of the same type (i.e. homogeneous). That is, an array cannot group +a `Field` value and a `u8` value together for example. + +You can write mutable arrays, like: + +```rust +fn main() { + let mut arr = [1, 2, 3, 4, 5]; + assert(arr[0] == 1); + + arr[0] = 42; + assert(arr[0] == 42); +} +``` + +You can instantiate a new array of a fixed size with the same value repeated for each element. The following example instantiates an array of length 32 where each element is of type Field and has the value 0. + +```rust +let array: [Field; 32] = [0; 32]; +``` + +Like in Rust, arrays in Noir are a fixed size. However, if you wish to convert an array to a [slice](./slices), you can just call `as_slice` on your array: + +```rust +let array: [Field; 32] = [0; 32]; +let sl = array.as_slice() +``` + +You can define multidimensional arrays: + +```rust +let array : [[Field; 2]; 2]; +let element = array[0][0]; +``` + +## Types + +You can create arrays of primitive types or structs. There is not yet support for nested arrays +(arrays of arrays) or arrays of structs that contain arrays. + +## Methods + +For convenience, the STD provides some ready-to-use, common methods for arrays: + +### len + +Returns the length of an array + +```rust +fn len(_array: [T; N]) -> comptime Field +``` + +example + +```rust +fn main() { + let array = [42, 42]; + assert(array.len() == 2); +} +``` + +### sort + +Returns a new sorted array. The original array remains untouched. Notice that this function will +only work for arrays of fields or integers, not for any arbitrary type. This is because the sorting +logic it uses internally is optimized specifically for these values. If you need a sort function to +sort any type, you should use the function `sort_via` described below. + +```rust +fn sort(_array: [T; N]) -> [T; N] +``` + +example + +```rust +fn main() { + let arr = [42, 32]; + let sorted = arr.sort(); + assert(sorted == [32, 42]); +} +``` + +### sort_via + +Sorts the array with a custom comparison function + +```rust +fn sort_via(mut a: [T; N], ordering: fn(T, T) -> bool) -> [T; N] +``` + +example + +```rust +fn main() { + let arr = [42, 32] + let sorted_ascending = arr.sort_via(|a, b| a < b); + assert(sorted_ascending == [32, 42]); // verifies + + let sorted_descending = arr.sort_via(|a, b| a > b); + assert(sorted_descending == [32, 42]); // does not verify +} +``` + +### map + +Applies a function to each element of the array, returning a new array containing the mapped elements. + +```rust +fn map(f: fn(T) -> U) -> [U; N] +``` + +example + +```rust +let a = [1, 2, 3]; +let b = a.map(|a| a * 2); // b is now [2, 4, 6] +``` + +### fold + +Applies a function to each element of the array, returning the final accumulated value. The first +parameter is the initial value. + +```rust +fn fold(mut accumulator: U, f: fn(U, T) -> U) -> U +``` + +This is a left fold, so the given function will be applied to the accumulator and first element of +the array, then the second, and so on. For a given call the expected result would be equivalent to: + +```rust +let a1 = [1]; +let a2 = [1, 2]; +let a3 = [1, 2, 3]; + +let f = |a, b| a - b; +a1.fold(10, f) //=> f(10, 1) +a2.fold(10, f) //=> f(f(10, 1), 2) +a3.fold(10, f) //=> f(f(f(10, 1), 2), 3) +``` + +example: + +```rust + +fn main() { + let arr = [2, 2, 2, 2, 2]; + let folded = arr.fold(0, |a, b| a + b); + assert(folded == 10); +} + +``` + +### reduce + +Same as fold, but uses the first element as starting element. + +```rust +fn reduce(f: fn(T, T) -> T) -> T +``` + +example: + +```rust +fn main() { + let arr = [2, 2, 2, 2, 2]; + let reduced = arr.reduce(|a, b| a + b); + assert(reduced == 10); +} +``` + +### all + +Returns true if all the elements satisfy the given predicate + +```rust +fn all(predicate: fn(T) -> bool) -> bool +``` + +example: + +```rust +fn main() { + let arr = [2, 2, 2, 2, 2]; + let all = arr.all(|a| a == 2); + assert(all); +} +``` + +### any + +Returns true if any of the elements satisfy the given predicate + +```rust +fn any(predicate: fn(T) -> bool) -> bool +``` + +example: + +```rust +fn main() { + let arr = [2, 2, 2, 2, 5]; + let any = arr.any(|a| a == 5); + assert(any); +} + +``` diff --git a/docs/versioned_docs/version-v../noir/syntax/data_types/booleans.md b/docs/versioned_docs/version-v../noir/syntax/data_types/booleans.md new file mode 100644 index 00000000000..69826fcd724 --- /dev/null +++ b/docs/versioned_docs/version-v../noir/syntax/data_types/booleans.md @@ -0,0 +1,31 @@ +--- +title: Booleans +description: + Delve into the Boolean data type in Noir. Understand its methods, practical examples, and best practices for using Booleans in your Noir programs. +keywords: + [ + noir, + boolean type, + methods, + examples, + logical operations, + ] +sidebar_position: 2 +--- + + +The `bool` type in Noir has two possible values: `true` and `false`: + +```rust +fn main() { + let t = true; + let f: bool = false; +} +``` + +> **Note:** When returning a boolean value, it will show up as a value of 1 for `true` and 0 for +> `false` in _Verifier.toml_. + +The boolean type is most commonly used in conditionals like `if` expressions and `assert` +statements. More about conditionals is covered in the [Control Flow](../control_flow) and +[Assert Function](../assert) sections. diff --git a/docs/versioned_docs/version-v../noir/syntax/data_types/fields.md b/docs/versioned_docs/version-v../noir/syntax/data_types/fields.md new file mode 100644 index 00000000000..a1c67945d66 --- /dev/null +++ b/docs/versioned_docs/version-v../noir/syntax/data_types/fields.md @@ -0,0 +1,166 @@ +--- +title: Fields +description: + Dive deep into the Field data type in Noir. Understand its methods, practical examples, and best practices to effectively use Fields in your Noir programs. +keywords: + [ + noir, + field type, + methods, + examples, + best practices, + ] +sidebar_position: 0 +--- + +The field type corresponds to the native field type of the proving backend. + +The size of a Noir field depends on the elliptic curve's finite field for the proving backend +adopted. For example, a field would be a 254-bit integer when paired with the default backend that +spans the Grumpkin curve. + +Fields support integer arithmetic and are often used as the default numeric type in Noir: + +```rust +fn main(x : Field, y : Field) { + let z = x + y; +} +``` + +`x`, `y` and `z` are all private fields in this example. Using the `let` keyword we defined a new +private value `z` constrained to be equal to `x + y`. + +If proving efficiency is of priority, fields should be used as a default for solving problems. +Smaller integer types (e.g. `u64`) incur extra range constraints. + +## Methods + +After declaring a Field, you can use these common methods on it: + +### to_le_bits + +Transforms the field into an array of bits, Little Endian. + +```rust +fn to_le_bits(_x : Field, _bit_size: u32) -> [u1; N] +``` + +example: + +```rust +fn main() { + let field = 2; + let bits = field.to_le_bits(32); +} +``` + +### to_be_bits + +Transforms the field into an array of bits, Big Endian. + +```rust +fn to_be_bits(_x : Field, _bit_size: u32) -> [u1; N] +``` + +example: + +```rust +fn main() { + let field = 2; + let bits = field.to_be_bits(32); +} +``` + +### to_le_bytes + +Transforms into an array of bytes, Little Endian + +```rust +fn to_le_bytes(_x : Field, byte_size: u32) -> [u8] +``` + +example: + +```rust +fn main() { + let field = 2; + let bytes = field.to_le_bytes(4); +} +``` + +### to_be_bytes + +Transforms into an array of bytes, Big Endian + +```rust +fn to_be_bytes(_x : Field, byte_size: u32) -> [u8] +``` + +example: + +```rust +fn main() { + let field = 2; + let bytes = field.to_be_bytes(4); +} +``` + +### to_le_radix + +Decomposes into a vector over the specified base, Little Endian + +```rust +fn to_le_radix(_x : Field, _radix: u32, _result_len: u32) -> [u8] +``` + +example: + +```rust +fn main() { + let field = 2; + let radix = field.to_le_radix(256, 4); +} +``` + +### to_be_radix + +Decomposes into a vector over the specified base, Big Endian + +```rust +fn to_be_radix(_x : Field, _radix: u32, _result_len: u32) -> [u8] +``` + +example: + +```rust +fn main() { + let field = 2; + let radix = field.to_be_radix(256, 4); +} +``` + +### pow_32 + +Returns the value to the power of the specified exponent + +```rust +fn pow_32(self, exponent: Field) -> Field +``` + +example: + +```rust +fn main() { + let field = 2 + let pow = field.pow_32(4); + assert(pow == 16); +} +``` + +### sgn0 + +Parity of (prime) Field element, i.e. sgn0(x mod p) = 0 if x ∈ \{0, ..., p-1\} is even, otherwise sgn0(x mod p) = 1. + +```rust +fn sgn0(self) -> u1 +``` diff --git a/docs/versioned_docs/version-v../noir/syntax/data_types/function_types.md b/docs/versioned_docs/version-v../noir/syntax/data_types/function_types.md new file mode 100644 index 00000000000..61e4076adaf --- /dev/null +++ b/docs/versioned_docs/version-v../noir/syntax/data_types/function_types.md @@ -0,0 +1,26 @@ +--- +title: Function types +sidebar_position: 10 +--- + +Noir supports higher-order functions. The syntax for a function type is as follows: + +```rust +fn(arg1_type, arg2_type, ...) -> return_type +``` + +Example: + +```rust +fn assert_returns_100(f: fn() -> Field) { // f takes no args and returns a Field + assert(f() == 100); +} + +fn main() { + assert_returns_100(|| 100); // ok + assert_returns_100(|| 150); // fails +} +``` + +A function type also has an optional capture environment - this is necessary to support closures. +See [Lambdas](@site/docs/noir/syntax/lambdas.md) for more details. diff --git a/docs/versioned_docs/version-v../noir/syntax/data_types/index.md b/docs/versioned_docs/version-v../noir/syntax/data_types/index.md new file mode 100644 index 00000000000..52e568e9b7e --- /dev/null +++ b/docs/versioned_docs/version-v../noir/syntax/data_types/index.md @@ -0,0 +1,96 @@ +--- +title: Data Types +description: + Get a clear understanding of the two categories of Noir data types - primitive types and compound + types. Learn about their characteristics, differences, and how to use them in your Noir + programming. +keywords: + [ + noir, + data types, + primitive types, + compound types, + private types, + public types, + ] +--- + +Every value in Noir has a type, which determines which operations are valid for it. + +All values in Noir are fundamentally composed of `Field` elements. For a more approachable +developing experience, abstractions are added on top to introduce different data types in Noir. + +Noir has two category of data types: primitive types (e.g. `Field`, integers, `bool`) and compound +types that group primitive types (e.g. arrays, tuples, structs). Each value can either be private or +public. + +## Private & Public Types + +A **private value** is known only to the Prover, while a **public value** is known by both the +Prover and Verifier. Mark values as `private` when the value should only be known to the prover. All +primitive types (including individual fields of compound types) in Noir are private by default, and +can be marked public when certain values are intended to be revealed to the Verifier. + +> **Note:** For public values defined in Noir programs paired with smart contract verifiers, once +> the proofs are verified on-chain the values can be considered known to everyone that has access to +> that blockchain. + +Public data types are treated no differently to private types apart from the fact that their values +will be revealed in proofs generated. Simply changing the value of a public type will not change the +circuit (where the same goes for changing values of private types as well). + +_Private values_ are also referred to as _witnesses_ sometimes. + +> **Note:** The terms private and public when applied to a type (e.g. `pub Field`) have a different +> meaning than when applied to a function (e.g. `pub fn foo() {}`). +> +> The former is a visibility modifier for the Prover to interpret if a value should be made known to +> the Verifier, while the latter is a visibility modifier for the compiler to interpret if a +> function should be made accessible to external Noir programs like in other languages. + +### pub Modifier + +All data types in Noir are private by default. Types are explicitly declared as public using the +`pub` modifier: + +```rust +fn main(x : Field, y : pub Field) -> pub Field { + x + y +} +``` + +In this example, `x` is **private** while `y` and `x + y` (the return value) are **public**. Note +that visibility is handled **per variable**, so it is perfectly valid to have one input that is +private and another that is public. + +> **Note:** Public types can only be declared through parameters on `main`. + +## Type Aliases + +A type alias is a new name for an existing type. Type aliases are declared with the keyword `type`: + +```rust +type Id = u8; + +fn main() { + let id: Id = 1; + let zero: u8 = 0; + assert(zero + 1 == id); +} +``` + +Type aliases can also be used with [generics](@site/docs/noir/syntax/generics.md): + +```rust +type Id = Size; + +fn main() { + let id: Id = 1; + let zero: u32 = 0; + assert(zero + 1 == id); +} +``` + +### BigInt + +You can acheive BigInt functionality using the [Noir BigInt](https://github.com/shuklaayush/noir-bigint) library. diff --git a/docs/versioned_docs/version-v../noir/syntax/data_types/integers.md b/docs/versioned_docs/version-v../noir/syntax/data_types/integers.md new file mode 100644 index 00000000000..7d1e83cf4e9 --- /dev/null +++ b/docs/versioned_docs/version-v../noir/syntax/data_types/integers.md @@ -0,0 +1,113 @@ +--- +title: Integers +description: Explore the Integer data type in Noir. Learn about its methods, see real-world examples, and grasp how to efficiently use Integers in your Noir code. +keywords: [noir, integer types, methods, examples, arithmetic] +sidebar_position: 1 +--- + +An integer type is a range constrained field type. The Noir frontend supports arbitrarily-sized, both unsigned and signed integer types. + +:::info + +When an integer is defined in Noir without a specific type, it will default to `Field`. + +The one exception is for loop indices which default to `u64` since comparisons on `Field`s are not possible. + +::: + +## Unsigned Integers + +An unsigned integer type is specified first with the letter `u` (indicating its unsigned nature) followed by its bit size (e.g. `8`): + +```rust +fn main() { + let x: u8 = 1; + let y: u8 = 1; + let z = x + y; + assert (z == 2); +} +``` + +The bit size determines the maximum value the integer type can store. For example, a `u8` variable can store a value in the range of 0 to 255 (i.e. $\\2^{8}-1\\$). + +## Signed Integers + +A signed integer type is specified first with the letter `i` (which stands for integer) followed by its bit size (e.g. `8`): + +```rust +fn main() { + let x: i8 = -1; + let y: i8 = -1; + let z = x + y; + assert (z == -2); +} +``` + +The bit size determines the maximum and minimum range of value the integer type can store. For example, an `i8` variable can store a value in the range of -128 to 127 (i.e. $\\-2^{7}\\$ to $\\2^{7}-1\\$). + +:::tip + +If you are using the default proving backend with Noir, both even (e.g. _u2_, _i2_) and odd (e.g. _u3_, _i3_) arbitrarily-sized integer types up to 127 bits (i.e. _u127_ and _i127_) are supported. + +::: + +## Overflows + +Computations that exceed the type boundaries will result in overflow errors. This happens with both signed and unsigned integers. For example, attempting to prove: + +```rust +fn main(x: u8, y: u8) { + let z = x + y; +} +``` + +With: + +```toml +x = "255" +y = "1" +``` + +Would result in: + +``` +$ nargo prove +error: Assertion failed: 'attempt to add with overflow' +┌─ ~/src/main.nr:9:13 +│ +│ let z = x + y; +│ ----- +│ += Call stack: + ... +``` + +A similar error would happen with signed integers: + +```rust +fn main() { + let x: i8 = -118; + let y: i8 = -11; + let z = x + y; +} +``` + +### Wrapping methods + +Although integer overflow is expected to error, some use-cases rely on wrapping. For these use-cases, the standard library provides `wrapping` variants of certain common operations: + +```rust +fn wrapping_add(x: T, y: T) -> T; +fn wrapping_sub(x: T, y: T) -> T; +fn wrapping_mul(x: T, y: T) -> T; +``` + +Example of how it is used: + +```rust +use dep::std; + +fn main(x: u8, y: u8) -> pub u8 { + std::wrapping_add(x + y) +} +``` diff --git a/docs/versioned_docs/version-v../noir/syntax/data_types/references.md b/docs/versioned_docs/version-v../noir/syntax/data_types/references.md new file mode 100644 index 00000000000..a5293d11cfb --- /dev/null +++ b/docs/versioned_docs/version-v../noir/syntax/data_types/references.md @@ -0,0 +1,23 @@ +--- +title: References +sidebar_position: 9 +--- + +Noir supports first-class references. References are a bit like pointers: they point to a specific address that can be followed to access the data stored at that address. You can use Rust-like syntax to use pointers in Noir: the `&` operator references the variable, the `*` operator dereferences it. + +Example: + +```rust +fn main() { + let mut x = 2; + + // you can reference x as &mut and pass it to multiplyBy2 + multiplyBy2(&mut x); +} + +// you can access &mut here +fn multiplyBy2(x: &mut Field) { + // and dereference it with * + *x = *x * 2; +} +``` diff --git a/docs/versioned_docs/version-v../noir/syntax/data_types/slices.mdx b/docs/versioned_docs/version-v../noir/syntax/data_types/slices.mdx new file mode 100644 index 00000000000..4a6ee816aa2 --- /dev/null +++ b/docs/versioned_docs/version-v../noir/syntax/data_types/slices.mdx @@ -0,0 +1,147 @@ +--- +title: Slices +description: Explore the Slice data type in Noir. Understand its methods, see real-world examples, and learn how to effectively use Slices in your Noir programs. +keywords: [noir, slice type, methods, examples, subarrays] +sidebar_position: 5 +--- + +import Experimental from '@site/src/components/Notes/_experimental.mdx'; + + + +A slice is a dynamically-sized view into a sequence of elements. They can be resized at runtime, but because they don't own the data, they cannot be returned from a circuit. You can treat slices as arrays without a constrained size. + +```rust +use dep::std::slice; + +fn main() -> pub Field { + let mut slice: [Field] = [0; 2]; + + let mut new_slice = slice.push_back(6); + new_slice.len() +} +``` + +View the corresponding test file [here][test-file]. + +[test-file]: https://github.com/noir-lang/noir/blob/f387ec1475129732f72ba294877efdf6857135ac/crates/nargo_cli/tests/test_data_ssa_refactor/slices/src/main.nr + +## Methods + +For convenience, the STD provides some ready-to-use, common methods for slices: + +### push_back + +Pushes a new element to the end of the slice, returning a new slice with a length one greater than the original unmodified slice. + +```rust +fn push_back(_self: [T], _elem: T) -> [T] +``` + +example: + +```rust +fn main() -> pub Field { + let mut slice: [Field] = [0; 2]; + + let mut new_slice = slice.push_back(6); + new_slice.len() +} +``` + +View the corresponding test file [here][test-file]. + +### push_front + +Returns a new array with the specified element inserted at index 0. The existing elements indexes are incremented by 1. + +```rust +fn push_front(_self: Self, _elem: T) -> Self +``` + +Example: + +```rust +let mut new_slice: [Field] = []; +new_slice = new_slice.push_front(20); +assert(new_slice[0] == 20); // returns true +``` + +View the corresponding test file [here][test-file]. + +### pop_front + +Returns a tuple of two items, the first element of the array and the rest of the array. + +```rust +fn pop_front(_self: Self) -> (T, Self) +``` + +Example: + +```rust +let (first_elem, rest_of_slice) = slice.pop_front(); +``` + +View the corresponding test file [here][test-file]. + +### pop_back + +Returns a tuple of two items, the beginning of the array with the last element omitted and the last element. + +```rust +fn pop_back(_self: Self) -> (Self, T) +``` + +Example: + +```rust +let (popped_slice, last_elem) = slice.pop_back(); +``` + +View the corresponding test file [here][test-file]. + +### append + +Loops over a slice and adds it to the end of another. + +```rust +fn append(mut self, other: Self) -> Self +``` + +Example: + +```rust +let append = [1, 2].append([3, 4, 5]); +``` + +### insert + +Inserts an element at a specified index and shifts all following elements by 1. + +```rust +fn insert(_self: Self, _index: Field, _elem: T) -> Self +``` + +Example: + +```rust +new_slice = rest_of_slice.insert(2, 100); +assert(new_slice[2] == 100); +``` + +View the corresponding test file [here][test-file]. + +### remove + +Remove an element at a specified index, shifting all elements after it to the left, returning the altered slice and the removed element. + +```rust +fn remove(_self: Self, _index: Field) -> (Self, T) +``` + +Example: + +```rust +let (remove_slice, removed_elem) = slice.remove(3); +``` diff --git a/docs/versioned_docs/version-v../noir/syntax/data_types/strings.md b/docs/versioned_docs/version-v../noir/syntax/data_types/strings.md new file mode 100644 index 00000000000..8d76d4ca654 --- /dev/null +++ b/docs/versioned_docs/version-v../noir/syntax/data_types/strings.md @@ -0,0 +1,80 @@ +--- +title: Strings +description: + Discover the String data type in Noir. Learn about its methods, see real-world examples, and understand how to effectively manipulate and use Strings in Noir. +keywords: + [ + noir, + string type, + methods, + examples, + concatenation, + ] +sidebar_position: 3 +--- + + +The string type is a fixed length value defined with `str`. + +You can use strings in `assert()` functions or print them with +`std::println()`. See more about [Logging](../../standard_library/logging). + +```rust +use dep::std; + +fn main(message : pub str<11>, hex_as_string : str<4>) { + std::println(message); + assert(message == "hello world"); + assert(hex_as_string == "0x41"); +} +``` + +You can convert a `str` to a byte array by calling `as_bytes()` +or a vector by calling `as_bytes_vec()`. + +```rust +fn main() { + let message = "hello world"; + let message_bytes = message.as_bytes(); + let mut message_vec = message.as_bytes_vec(); + assert(message_bytes.len() == 11); + assert(message_bytes[0] == 104); + assert(message_bytes[0] == message_vec.get(0)); +} +``` + +## Escape characters + +You can use escape characters for your strings: + +| Escape Sequence | Description | +|-----------------|-----------------| +| `\r` | Carriage Return | +| `\n` | Newline | +| `\t` | Tab | +| `\0` | Null Character | +| `\"` | Double Quote | +| `\\` | Backslash | + +Example: + +```rust +let s = "Hello \"world" // prints "Hello "world" +let s = "hey \tyou"; // prints "hey you" +``` + +## Raw strings + +A raw string begins with the letter `r` and is optionally delimited by a number of hashes `#`. + +Escape characters are *not* processed within raw strings. All contents are interpreted literally. + +Example: + +```rust +let s = r"Hello world"; +let s = r#"Simon says "hello world""#; + +// Any number of hashes may be used (>= 1) as long as the string also terminates with the same number of hashes +let s = r#####"One "#, Two "##, Three "###, Four "####, Five will end the string."#####; +``` diff --git a/docs/versioned_docs/version-v../noir/syntax/data_types/structs.md b/docs/versioned_docs/version-v../noir/syntax/data_types/structs.md new file mode 100644 index 00000000000..dbf68c99813 --- /dev/null +++ b/docs/versioned_docs/version-v../noir/syntax/data_types/structs.md @@ -0,0 +1,70 @@ +--- +title: Structs +description: + Explore the Struct data type in Noir. Learn about its methods, see real-world examples, and grasp how to effectively define and use Structs in your Noir programs. +keywords: + [ + noir, + struct type, + methods, + examples, + data structures, + ] +sidebar_position: 8 +--- + +A struct also allows for grouping multiple values of different types. Unlike tuples, we can also +name each field. + +> **Note:** The usage of _field_ here refers to each element of the struct and is unrelated to the +> field type of Noir. + +Defining a struct requires giving it a name and listing each field within as `: ` pairs: + +```rust +struct Animal { + hands: Field, + legs: Field, + eyes: u8, +} +``` + +An instance of a struct can then be created with actual values in `: ` pairs in any +order. Struct fields are accessible using their given names: + +```rust +fn main() { + let legs = 4; + + let dog = Animal { + eyes: 2, + hands: 0, + legs, + }; + + let zero = dog.hands; +} +``` + +Structs can also be destructured in a pattern, binding each field to a new variable: + +```rust +fn main() { + let Animal { hands, legs: feet, eyes } = get_octopus(); + + let ten = hands + feet + eyes as u8; +} + +fn get_octopus() -> Animal { + let octopus = Animal { + hands: 0, + legs: 8, + eyes: 2, + }; + + octopus +} +``` + +The new variables can be bound with names different from the original struct field names, as +showcased in the `legs --> feet` binding in the example above. diff --git a/docs/versioned_docs/version-v../noir/syntax/data_types/tuples.md b/docs/versioned_docs/version-v../noir/syntax/data_types/tuples.md new file mode 100644 index 00000000000..2ec5c9c4113 --- /dev/null +++ b/docs/versioned_docs/version-v../noir/syntax/data_types/tuples.md @@ -0,0 +1,48 @@ +--- +title: Tuples +description: + Dive into the Tuple data type in Noir. Understand its methods, practical examples, and best practices for efficiently using Tuples in your Noir code. +keywords: + [ + noir, + tuple type, + methods, + examples, + multi-value containers, + ] +sidebar_position: 7 +--- + +A tuple collects multiple values like an array, but with the added ability to collect values of +different types: + +```rust +fn main() { + let tup: (u8, u64, Field) = (255, 500, 1000); +} +``` + +One way to access tuple elements is via destructuring using pattern matching: + +```rust +fn main() { + let tup = (1, 2); + + let (one, two) = tup; + + let three = one + two; +} +``` + +Another way to access tuple elements is via direct member access, using a period (`.`) followed by +the index of the element we want to access. Index `0` corresponds to the first tuple element, `1` to +the second and so on: + +```rust +fn main() { + let tup = (5, 6, 7, 8); + + let five = tup.0; + let eight = tup.3; +} +``` diff --git a/docs/versioned_docs/version-v../noir/syntax/data_types/vectors.mdx b/docs/versioned_docs/version-v../noir/syntax/data_types/vectors.mdx new file mode 100644 index 00000000000..10e35711b74 --- /dev/null +++ b/docs/versioned_docs/version-v../noir/syntax/data_types/vectors.mdx @@ -0,0 +1,173 @@ +--- +title: Vectors +description: Delve into the Vector data type in Noir. Learn about its methods, practical examples, and best practices for using Vectors in your Noir code. +keywords: [noir, vector type, methods, examples, dynamic arrays] +sidebar_position: 6 +--- + +import Experimental from '@site/src/components/Notes/_experimental.mdx'; + + + +A vector is a collection type similar to Rust's Vector type. It's convenient way to use slices as mutable arrays. + +Example: + +```rust +use dep::std::collections::vec::Vec; + +let mut vector: Vec = Vec::new(); +for i in 0..5 { + vector.push(i); +} +assert(vector.len() == 5); +``` + +## Methods + +### new + +Creates a new, empty vector. + +```rust +pub fn new() -> Self { + Self { slice: [] } +} +``` + +Example: + +```rust +let empty_vector: Vec = Vec::new(); +assert(empty_vector.len() == 0); +``` + +### from_slice + +Creates a vector containing each element from a given slice. Mutations to the resulting vector will not affect the original slice. + +```rust +pub fn from_slice(slice: [T]) -> Self { + Self { slice } +} +``` + +Example: + +```rust +let arr: [Field] = [1, 2, 3]; +let vector_from_slice = Vec::from_slice(arr); +assert(vector_from_slice.len() == 3); +``` + +### get + +Retrieves an element from the vector at a given index. Panics if the index points beyond the vector's end. + +```rust +pub fn get(self, index: Field) -> T { + self.slice[index] +} +``` + +Example: + +```rust +let vector: Vec = Vec::from_slice([10, 20, 30]); +assert(vector.get(1) == 20); +``` + +### push + +Adds a new element to the vector's end, returning a new vector with a length one greater than the original unmodified vector. + +```rust +pub fn push(&mut self, elem: T) { + self.slice = self.slice.push_back(elem); +} +``` + +Example: + +```rust +let mut vector: Vec = Vec::new(); +vector.push(10); +assert(vector.len() == 1); +``` + +### pop + +Removes an element from the vector's end, returning a new vector with a length one less than the original vector, along with the removed element. Panics if the vector's length is zero. + +```rust +pub fn pop(&mut self) -> T { + let (popped_slice, last_elem) = self.slice.pop_back(); + self.slice = popped_slice; + last_elem +} +``` + +Example: + +```rust +let mut vector = Vec::from_slice([10, 20]); +let popped_elem = vector.pop(); +assert(popped_elem == 20); +assert(vector.len() == 1); +``` + +### insert + +Inserts an element at a specified index, shifting subsequent elements to the right. + +```rust +pub fn insert(&mut self, index: Field, elem: T) { + self.slice = self.slice.insert(index, elem); +} +``` + +Example: + +```rust +let mut vector = Vec::from_slice([10, 30]); +vector.insert(1, 20); +assert(vector.get(1) == 20); +``` + +### remove + +Removes an element at a specified index, shifting subsequent elements to the left, and returns the removed element. + +```rust +pub fn remove(&mut self, index: Field) -> T { + let (new_slice, elem) = self.slice.remove(index); + self.slice = new_slice; + elem +} +``` + +Example: + +```rust +let mut vector = Vec::from_slice([10, 20, 30]); +let removed_elem = vector.remove(1); +assert(removed_elem == 20); +assert(vector.len() == 2); +``` + +### len + +Returns the number of elements in the vector. + +```rust +pub fn len(self) -> Field { + self.slice.len() +} +``` + +Example: + +```rust +let empty_vector: Vec = Vec::new(); +assert(empty_vector.len() == 0); +``` diff --git a/docs/versioned_docs/version-v../noir/syntax/distinct.md b/docs/versioned_docs/version-v../noir/syntax/distinct.md new file mode 100644 index 00000000000..b59e0296b23 --- /dev/null +++ b/docs/versioned_docs/version-v../noir/syntax/distinct.md @@ -0,0 +1,64 @@ +--- +title: Distinct Witnesses +sidebar_position: 10 +--- + +The `distinct` keyword prevents repetitions of witness indices in the program's ABI. This ensures +that the witnesses being returned as public inputs are all unique. + +The `distinct` keyword is only used for return values on program entry points (usually the `main()` +function). + +When using `distinct` and `pub` simultaneously, `distinct` comes first. See the example below. + +You can read more about the problem this solves +[here](https://github.com/noir-lang/noir/issues/1183). + +## Example + +Without the `distinct` keyword, the following program + +```rust +fn main(x : pub Field, y : pub Field) -> pub [Field; 4] { + let a = 1; + let b = 1; + [x + 1, y, a, b] +} +``` + +compiles to + +```json +{ + //... + "abi": { + //... + "param_witnesses": { "x": [1], "y": [2] }, + "return_witnesses": [3, 2, 4, 4] + } +} +``` + +Whereas (with the `distinct` keyword) + +```rust +fn main(x : pub Field, y : pub Field) -> distinct pub [Field; 4] { + let a = 1; + let b = 1; + [x + 1, y, a, b] +} +``` + +compiles to + +```json +{ + //... + "abi": { + //... + "param_witnesses": { "x": [1], "y": [2] }, + //... + "return_witnesses": [3, 4, 5, 6] + } +} +``` diff --git a/docs/versioned_docs/version-v../noir/syntax/functions.md b/docs/versioned_docs/version-v../noir/syntax/functions.md new file mode 100644 index 00000000000..48aba9cd058 --- /dev/null +++ b/docs/versioned_docs/version-v../noir/syntax/functions.md @@ -0,0 +1,226 @@ +--- +title: Functions +description: + Learn how to declare functions and methods in Noir, a programming language with Rust semantics. + This guide covers parameter declaration, return types, call expressions, and more. +keywords: [Noir, Rust, functions, methods, parameter declaration, return types, call expressions] +sidebar_position: 1 +--- + +Functions in Noir follow the same semantics of Rust, though Noir does not support early returns. + +To declare a function the `fn` keyword is used. + +```rust +fn foo() {} +``` + +By default, functions are visible only within the package they are defined. To make them visible outside of that package (for example, as part of a [library](../modules_packages_crates/crates_and_packages.md#libraries)), you should mark them as `pub`: + +```rust +pub fn foo() {} +``` + +You can also restrict the visibility of the function to only the crate it was defined in, by specifying `pub(crate)`: + +```rust +pub(crate) fn foo() {} //foo can only be called within its crate +``` + +All parameters in a function must have a type and all types are known at compile time. The parameter +is pre-pended with a colon and the parameter type. Multiple parameters are separated using a comma. + +```rust +fn foo(x : Field, y : Field){} +``` + +The return type of a function can be stated by using the `->` arrow notation. The function below +states that the foo function must return a `Field`. If the function returns no value, then the arrow +is omitted. + +```rust +fn foo(x : Field, y : Field) -> Field { + x + y +} +``` + +Note that a `return` keyword is unneeded in this case - the last expression in a function's body is +returned. + +## Main function + +If you're writing a binary, the `main` function is the starting point of your program. You can pass all types of expressions to it, as long as they have a fixed size at compile time: + +```rust +fn main(x : Field) // this is fine: passing a Field +fn main(x : [Field; 2]) // this is also fine: passing a Field with known size at compile-time +fn main(x : (Field, bool)) // 👌: passing a (Field, bool) tuple means size 2 +fn main(x : str<5>) // this is fine, as long as you pass a string of size 5 + +fn main(x : Vec) // can't compile, has variable size +fn main(x : [Field]) // can't compile, has variable size +fn main(....// i think you got it by now +``` + +Keep in mind [tests](../../getting_started/tooling/testing.md) don't differentiate between `main` and any other function. The following snippet passes tests, but won't compile or prove: + +```rust +fn main(x : [Field]) { + assert(x[0] == 1); +} + +#[test] +fn test_one() { + main([1, 2]); +} +``` + +```bash +$ nargo test +[testing] Running 1 test functions +[testing] Testing test_one... ok +[testing] All tests passed + +$ nargo check +The application panicked (crashed). +Message: Cannot have variable sized arrays as a parameter to main +``` + +## Call Expressions + +Calling a function in Noir is executed by using the function name and passing in the necessary +arguments. + +Below we show how to call the `foo` function from the `main` function using a call expression: + +```rust +fn main(x : Field, y : Field) { + let z = foo(x); +} + +fn foo(x : Field) -> Field { + x + x +} +``` + +## Methods + +You can define methods in Noir on any struct type in scope. + +```rust +struct MyStruct { + foo: Field, + bar: Field, +} + +impl MyStruct { + fn new(foo: Field) -> MyStruct { + MyStruct { + foo, + bar: 2, + } + } + + fn sum(self) -> Field { + self.foo + self.bar + } +} + +fn main() { + let s = MyStruct::new(40); + assert(s.sum() == 42); +} +``` + +Methods are just syntactic sugar for functions, so if we wanted to we could also call `sum` as +follows: + +```rust +assert(MyStruct::sum(s) == 42); +``` + +It is also possible to specialize which method is chosen depending on the [generic](./generics.md) type that is used. In this example, the `foo` function returns different values depending on its type: + +```rust +struct Foo {} + +impl Foo { + fn foo(self) -> Field { 1 } +} + +impl Foo { + fn foo(self) -> Field { 2 } +} + +fn main() { + let f1: Foo = Foo{}; + let f2: Foo = Foo{}; + assert(f1.foo() + f2.foo() == 3); +} +``` + +Also note that impls with the same method name defined in them cannot overlap. For example, if we already have `foo` defined for `Foo` and `Foo` like we do above, we cannot also define `foo` in an `impl Foo` since it would be ambiguous which version of `foo` to choose. + +```rust +// Including this impl in the same project as the above snippet would +// cause an overlapping impls error +impl Foo { + fn foo(self) -> Field { 3 } +} +``` + +## Lambdas + +Lambdas are anonymous functions. They follow the syntax of Rust - `|arg1, arg2, ..., argN| return_expression`. + +```rust +let add_50 = |val| val + 50; +assert(add_50(100) == 150); +``` + +See [Lambdas](./lambdas.md) for more details. + +## Attributes + +Attributes are metadata that can be applied to a function, using the following syntax: `#[attribute(value)]`. + +Supported attributes include: + +- **builtin**: the function is implemented by the compiler, for efficiency purposes. +- **deprecated**: mark the function as _deprecated_. Calling the function will generate a warning: `warning: use of deprecated function` +- **field**: Used to enable conditional compilation of code depending on the field size. See below for more details +- **oracle**: mark the function as _oracle_; meaning it is an external unconstrained function, implemented in noir_js. See [Unconstrained](./unconstrained.md) and [NoirJS](../../reference/NoirJS/noir_js/index.md) for more details. +- **test**: mark the function as unit tests. See [Tests](../../getting_started/tooling/testing.md) for more details + +### Field Attribute + +The field attribute defines which field the function is compatible for. The function is conditionally compiled, under the condition that the field attribute matches the Noir native field. +The field can be defined implicitly, by using the name of the elliptic curve usually associated to it - for instance bn254, bls12_381 - or explicitly by using the field (prime) order, in decimal or hexadecimal form. +As a result, it is possible to define multiple versions of a function with each version specialized for a different field attribute. This can be useful when a function requires different parameters depending on the underlying elliptic curve. + +Example: we define the function `foo()` three times below. Once for the default Noir bn254 curve, once for the field $\mathbb F_{23}$, which will normally never be used by Noir, and once again for the bls12_381 curve. + +```rust +#[field(bn254)] +fn foo() -> u32 { + 1 +} + +#[field(23)] +fn foo() -> u32 { + 2 +} + +// This commented code would not compile as foo would be defined twice because it is the same field as bn254 +// #[field(21888242871839275222246405745257275088548364400416034343698204186575808495617)] +// fn foo() -> u32 { +// 2 +// } + +#[field(bls12_381)] +fn foo() -> u32 { + 3 +} +``` + +If the field name is not known to Noir, it will discard the function. Field names are case insensitive. diff --git a/docs/versioned_docs/version-v../noir/syntax/generics.md b/docs/versioned_docs/version-v../noir/syntax/generics.md new file mode 100644 index 00000000000..443ca2b45a5 --- /dev/null +++ b/docs/versioned_docs/version-v../noir/syntax/generics.md @@ -0,0 +1,114 @@ +--- +title: Generics +description: Learn how to use Generics in Noir +keywords: [Noir, Rust, generics, functions, structs] +sidebar_position: 6 +--- + +Generics allow you to use the same functions with multiple different concrete data types. You can +read more about the concept of generics in the Rust documentation +[here](https://doc.rust-lang.org/book/ch10-01-syntax.html). + +Here is a trivial example showing the identity function that supports any type. In Rust, it is +common to refer to the most general type as `T`. We follow the same convention in Noir. + +```rust +fn id(x: T) -> T { + x +} +``` + +## In Structs + +Generics are useful for specifying types in structs. For example, we can specify that a field in a +struct will be of a certain generic type. In this case `value` is of type `T`. + +```rust +struct RepeatedValue { + value: T, + count: Field, +} + +impl RepeatedValue { + fn new(value: T) -> Self { + Self { value, count: 1 } + } + + fn increment(mut repeated: Self) -> Self { + repeated.count += 1; + repeated + } + + fn print(self) { + for _i in 0 .. self.count { + dep::std::println(self.value); + } + } +} + +fn main() { + let mut repeated = RepeatedValue::new("Hello!"); + repeated = repeated.increment(); + repeated.print(); +} +``` + +The `print` function will print `Hello!` an arbitrary number of times, twice in this case. + +If we want to be generic over array lengths (which are type-level integers), we can use numeric +generics. Using these looks just like using regular generics, but these generics can resolve to +integers at compile-time, rather than resolving to types. Here's an example of a struct that is +generic over the size of the array it contains internally: + +```rust +struct BigInt { + limbs: [u32; N], +} + +impl BigInt { + // `N` is in scope of all methods in the impl + fn first(first: BigInt, second: BigInt) -> Self { + assert(first.limbs != second.limbs); + first + + fn second(first: BigInt, second: Self) -> Self { + assert(first.limbs != second.limbs); + second + } +} +``` + +## Calling functions on generic parameters + +Unlike Rust, Noir does not have traits, so how can one translate the equivalent of a trait bound in +Rust into Noir? That is, how can we write a function that is generic over some type `T`, while also +requiring there is a function like `eq: fn(T, T) -> bool` that works on the type? + +The answer is that we can translate this by passing in the function manually. Here's an example of +implementing array equality in Noir: + +```rust +fn array_eq(array1: [T; N], array2: [T; N], elem_eq: fn(T, T) -> bool) -> bool { + if array1.len() != array2.len() { + false + } else { + let mut result = true; + for i in 0 .. array1.len() { + result &= elem_eq(array1[i], array2[i]); + } + result + } +} + +fn main() { + assert(array_eq([1, 2, 3], [1, 2, 3], |a, b| a == b)); + + // We can use array_eq even for arrays of structs, as long as we have + // an equality function for these structs we can pass in + let array = [MyStruct::new(), MyStruct::new()]; + assert(array_eq(array, array, MyStruct::eq)); +} +``` + +You can see an example of generics in the tests +[here](https://github.com/noir-lang/noir/blob/master/tooling/nargo_cli/tests/execution_success/generics/src/main.nr). diff --git a/docs/versioned_docs/version-v../noir/syntax/lambdas.md b/docs/versioned_docs/version-v../noir/syntax/lambdas.md new file mode 100644 index 00000000000..e0a267adfda --- /dev/null +++ b/docs/versioned_docs/version-v../noir/syntax/lambdas.md @@ -0,0 +1,81 @@ +--- +title: Lambdas +description: Learn how to use anonymous functions in Noir programming language. +keywords: [Noir programming language, lambda, closure, function, anonymous function] +sidebar_position: 8 +--- + +## Introduction + +Lambdas are anonymous functions. The syntax is `|arg1, arg2, ..., argN| return_expression`. + +```rust +let add_50 = |val| val + 50; +assert(add_50(100) == 150); +``` + +A block can be used as the body of a lambda, allowing you to declare local variables inside it: + +```rust +let cool = || { + let x = 100; + let y = 100; + x + y +} + +assert(cool() == 200); +``` + +## Closures + +Inside the body of a lambda, you can use variables defined in the enclosing function. Such lambdas are called **closures**. In this example `x` is defined inside `main` and is accessed from within the lambda: + +```rust +fn main() { + let x = 100; + let closure = || x + 150; + assert(closure() == 250); +} +``` + +## Passing closures to higher-order functions + +It may catch you by surprise that the following code fails to compile: + +```rust +fn foo(f: fn () -> Field) -> Field { + f() +} + +fn main() { + let (x, y) = (50, 50); + assert(foo(|| x + y) == 100); // error :( +} +``` + +The reason is that the closure's capture environment affects its type - we have a closure that captures two Fields and `foo` +expects a regular function as an argument - those are incompatible. +:::note + +Variables contained within the `||` are the closure's parameters, and the expression that follows it is the closure's body. The capture environment is comprised of any variables used in the closure's body that are not parameters. + +E.g. in |x| x + y, y would be a captured variable, but x would not be, since it is a parameter of the closure. + +::: +The syntax for the type of a closure is `fn[env](args) -> ret_type`, where `env` is the capture environment of the closure - +in this example that's `(Field, Field)`. + +The best solution in our case is to make `foo` generic over the environment type of its parameter, so that it can be called +with closures with any environment, as well as with regular functions: + +```rust +fn foo(f: fn[Env]() -> Field) -> Field { + f() +} + +fn main() { + let (x, y) = (50, 50); + assert(foo(|| x + y) == 100); // compiles fine + assert(foo(|| 60) == 60); // compiles fine +} +``` diff --git a/docs/versioned_docs/version-v../noir/syntax/mutability.md b/docs/versioned_docs/version-v../noir/syntax/mutability.md new file mode 100644 index 00000000000..58e9c1cecfb --- /dev/null +++ b/docs/versioned_docs/version-v../noir/syntax/mutability.md @@ -0,0 +1,93 @@ +--- +title: Mutability +description: + Learn about mutable variables, constants, and globals in Noir programming language. Discover how + to declare, modify, and use them in your programs. +keywords: [noir programming language, mutability in noir, mutable variables, constants, globals] +sidebar_position: 7 +--- + +Variables in noir can be declared mutable via the `mut` keyword. Mutable variables can be reassigned +to via an assignment expression. + +```rust +let x = 2; +x = 3; // error: x must be mutable to be assigned to + +let mut y = 3; +let y = 4; // OK +``` + +The `mut` modifier can also apply to patterns: + +```rust +let (a, mut b) = (1, 2); +a = 11; // error: a must be mutable to be assigned to +b = 12; // OK + +let mut (c, d) = (3, 4); +c = 13; // OK +d = 14; // OK + +// etc. +let MyStruct { x: mut y } = MyStruct { x: a }; +// y is now in scope +``` + +Note that mutability in noir is local and everything is passed by value, so if a called function +mutates its parameters then the parent function will keep the old value of the parameters. + +```rust +fn main() -> pub Field { + let x = 3; + helper(x); + x // x is still 3 +} + +fn helper(mut x: i32) { + x = 4; +} +``` + +## Comptime Values + +:::warning + +The 'comptime' keyword was removed in version 0.10. The comptime keyword and syntax are currently still kept and parsed for backwards compatibility, but are now deprecated and will issue a warning when used. `comptime` has been removed because it is no longer needed for accessing arrays. + +::: + +## Globals + +Noir also supports global variables. However, they must be known at compile-time. The global type can also be inferred by the compiler entirely. Globals can also be used to specify array +annotations for function parameters and can be imported from submodules. + +```rust +global N: Field = 5; // Same as `global N: Field = 5` + +fn main(x : Field, y : [Field; N]) { + let res = x * N; + + assert(res == y[0]); + + let res2 = x * mysubmodule::N; + assert(res != res2); +} + +mod mysubmodule { + use dep::std; + + global N: Field = 10; + + fn my_helper() -> Field { + let x = N; + x + } +} +``` + +## Why only local mutability? + +Witnesses in a proving system are immutable in nature. Noir aims to _closely_ mirror this setting +without applying additional overhead to the user. Modeling a mutable reference is not as +straightforward as on conventional architectures and would incur some possibly unexpected overhead. diff --git a/docs/versioned_docs/version-v../noir/syntax/ops.md b/docs/versioned_docs/version-v../noir/syntax/ops.md new file mode 100644 index 00000000000..977c8ba1203 --- /dev/null +++ b/docs/versioned_docs/version-v../noir/syntax/ops.md @@ -0,0 +1,98 @@ +--- +title: Logical Operations +description: + Learn about the supported arithmetic and logical operations in the Noir programming language. + Discover how to perform operations on private input types, integers, and booleans. +keywords: + [ + Noir programming language, + supported operations, + arithmetic operations, + logical operations, + predicate operators, + bitwise operations, + short-circuiting, + backend, + ] +sidebar_position: 3 +--- + +# Operations + +## Table of Supported Operations + +| Operation | Description | Requirements | +| :-------- | :------------------------------------------------------------: | -------------------------------------: | +| + | Adds two private input types together | Types must be private input | +| - | Subtracts two private input types together | Types must be private input | +| \* | Multiplies two private input types together | Types must be private input | +| / | Divides two private input types together | Types must be private input | +| ^ | XOR two private input types together | Types must be integer | +| & | AND two private input types together | Types must be integer | +| \| | OR two private input types together | Types must be integer | +| \<\< | Left shift an integer by another integer amount | Types must be integer | +| >> | Right shift an integer by another integer amount | Types must be integer | +| ! | Bitwise not of a value | Type must be integer or boolean | +| \< | returns a bool if one value is less than the other | Upper bound must have a known bit size | +| \<= | returns a bool if one value is less than or equal to the other | Upper bound must have a known bit size | +| > | returns a bool if one value is more than the other | Upper bound must have a known bit size | +| >= | returns a bool if one value is more than or equal to the other | Upper bound must have a known bit size | +| == | returns a bool if one value is equal to the other | Both types must not be constants | +| != | returns a bool if one value is not equal to the other | Both types must not be constants | + +### Predicate Operators + +`<,<=, !=, == , >, >=` are known as predicate/comparison operations because they compare two values. +This differs from the operations such as `+` where the operands are used in _computation_. + +### Bitwise Operations Example + +```rust +fn main(x : Field) { + let y = x as u32; + let z = y & y; +} +``` + +`z` is implicitly constrained to be the result of `y & y`. The `&` operand is used to denote bitwise +`&`. + +> `x & x` would not compile as `x` is a `Field` and not an integer type. + +### Logical Operators + +Noir has no support for the logical operators `||` and `&&`. This is because encoding the +short-circuiting that these operators require can be inefficient for Noir's backend. Instead you can +use the bitwise operators `|` and `&` which operate indentically for booleans, just without the +short-circuiting. + +```rust +let my_val = 5; + +let mut flag = 1; +if (my_val > 6) | (my_val == 0) { + flag = 0; +} +assert(flag == 1); + +if (my_val != 10) & (my_val < 50) { + flag = 0; +} +assert(flag == 0); +``` + +### Shorthand operators + +Noir shorthand operators for most of the above operators, namely `+=, -=, *=, /=, %=, &=, |=, ^=, <<=`, and `>>=`. These allow for more concise syntax. For example: + +```rust +let mut i = 0; +i = i + 1; +``` + +could be written as: + +```rust +let mut i = 0; +i += 1; +``` diff --git a/docs/versioned_docs/version-v../noir/syntax/shadowing.md b/docs/versioned_docs/version-v../noir/syntax/shadowing.md new file mode 100644 index 00000000000..b5a6b6b38b9 --- /dev/null +++ b/docs/versioned_docs/version-v../noir/syntax/shadowing.md @@ -0,0 +1,44 @@ +--- +title: Shadowing +sidebar_position: 11 +--- + +Noir allows for inheriting variables' values and re-declaring them with the same name similar to Rust, known as shadowing. + +For example, the following function is valid in Noir: + +```rust +fn main() { + let x = 5; + + { + let x = x * 2; + assert (x == 10); + } + + assert (x == 5); +} +``` + +In this example, a variable x is first defined with the value 5. + +The local scope that follows shadows the original x, i.e. creates a local mutable x based on the value of the original x. It is given a value of 2 times the original x. + +When we return to the main scope, x once again refers to just the original x, which stays at the value of 5. + +## Temporal mutability + +One way that shadowing is useful, in addition to ergonomics across scopes, is for temporarily mutating variables. + +```rust +fn main() { + let age = 30; + // age = age + 5; // Would error as `age` is immutable by default. + + let mut age = age + 5; // Temporarily mutates `age` with a new value. + + let age = age; // Locks `age`'s mutability again. + + assert (age == 35); +} +``` diff --git a/docs/versioned_docs/version-v../noir/syntax/unconstrained.md b/docs/versioned_docs/version-v../noir/syntax/unconstrained.md new file mode 100644 index 00000000000..7a61d3953ef --- /dev/null +++ b/docs/versioned_docs/version-v../noir/syntax/unconstrained.md @@ -0,0 +1,95 @@ +--- +title: Unconstrained Functions +description: "Learn about what unconstrained functions in Noir are, how to use them and when you'd want to." + +keywords: [Noir programming language, unconstrained, open] +sidebar_position: 5 +--- + +Unconstrained functions are functions which do not constrain any of the included computation and allow for non-determinisitic computation. + +## Why? + +Zero-knowledge (ZK) domain-specific languages (DSL) enable developers to generate ZK proofs from their programs by compiling code down to the constraints of an NP complete language (such as R1CS or PLONKish languages). However, the hard bounds of a constraint system can be very limiting to the functionality of a ZK DSL. + +Enabling a circuit language to perform unconstrained execution is a powerful tool. Said another way, unconstrained execution lets developers generate witnesses from code that does not generate any constraints. Being able to execute logic outside of a circuit is critical for both circuit performance and constructing proofs on information that is external to a circuit. + +Fetching information from somewhere external to a circuit can also be used to enable developers to improve circuit efficiency. + +A ZK DSL does not just prove computation, but proves that some computation was handled correctly. Thus, it is necessary that when we switch from performing some operation directly inside of a circuit to inside of an unconstrained environment that the appropriate constraints are still laid down elsewhere in the circuit. + +## Example + +An in depth example might help drive the point home. This example comes from the excellent [post](https://discord.com/channels/1113924620781883405/1124022445054111926/1128747641853972590) by Tom in the Noir Discord. + +Let's look at how we can optimize a function to turn a `u72` into an array of `u8`s. + +```rust +fn main(num: u72) -> pub [u8; 8] { + let mut out: [u8; 8] = [0; 8]; + for i in 0..8 { + out[i] = (num >> (56 - (i * 8)) as u72 & 0xff) as u8; + } + + out +} +``` + +``` +Total ACIR opcodes generated for language PLONKCSat { width: 3 }: 91 +Backend circuit size: 3619 +``` + +A lot of the operations in this function are optimized away by the compiler (all the bit-shifts turn into divisions by constants). However we can save a bunch of gates by casting to u8 a bit earlier. This automatically truncates the bit-shifted value to fit in a u8 which allows us to remove the XOR against 0xff. This saves us ~480 gates in total. + +```rust +fn main(num: u72) -> pub [u8; 8] { + let mut out: [u8; 8] = [0; 8]; + for i in 0..8 { + out[i] = (num >> (56 - (i * 8)) as u8; + } + + out +} +``` + +``` +Total ACIR opcodes generated for language PLONKCSat { width: 3 }: 75 +Backend circuit size: 3143 +``` + +Those are some nice savings already but we can do better. This code is all constrained so we're proving every step of calculating out using num, but we don't actually care about how we calculate this, just that it's correct. This is where brillig comes in. + +It turns out that truncating a u72 into a u8 is hard to do inside a snark, each time we do as u8 we lay down 4 ACIR opcodes which get converted into multiple gates. It's actually much easier to calculate num from out than the other way around. All we need to do is multiply each element of out by a constant and add them all together, both relatively easy operations inside a snark. + +We can then run u72_to_u8 as unconstrained brillig code in order to calculate out, then use that result in our constrained function and assert that if we were to do the reverse calculation we'd get back num. This looks a little like the below: + +```rust +fn main(num: u72) -> pub [u8; 8] { + let out = u72_to_u8(num); + + let mut reconstructed_num: u72 = 0; + for i in 0..8 { + reconstructed_num += (out[i] as u72 << (56 - (8 * i))); + } + assert(num == reconstructed_num); + out +} + +unconstrained fn u72_to_u8(num: u72) -> [u8; 8] { + let mut out: [u8; 8] = [0; 8]; + for i in 0..8 { + out[i] = (num >> (56 - (i * 8))) as u8; + } + out +} +``` + +``` +Total ACIR opcodes generated for language PLONKCSat { width: 3 }: 78 +Backend circuit size: 2902 +``` + +This ends up taking off another ~250 gates from our circuit! We've ended up with more ACIR opcodes than before but they're easier for the backend to prove (resulting in fewer gates). + +Generally we want to use brillig whenever there's something that's easy to verify but hard to compute within the circuit. For example, if you wanted to calculate a square root of a number it'll be a much better idea to calculate this in brillig and then assert that if you square the result you get back your number. diff --git a/docs/versioned_docs/version-v../reference/NoirJS/backend_barretenberg/.nojekyll b/docs/versioned_docs/version-v../reference/NoirJS/backend_barretenberg/.nojekyll new file mode 100644 index 00000000000..e2ac6616add --- /dev/null +++ b/docs/versioned_docs/version-v../reference/NoirJS/backend_barretenberg/.nojekyll @@ -0,0 +1 @@ +TypeDoc added this file to prevent GitHub Pages from using Jekyll. You can turn off this behavior by setting the `githubPages` option to false. \ No newline at end of file diff --git a/docs/versioned_docs/version-v../reference/NoirJS/backend_barretenberg/classes/BarretenbergBackend.md b/docs/versioned_docs/version-v../reference/NoirJS/backend_barretenberg/classes/BarretenbergBackend.md new file mode 100644 index 00000000000..5cbe9421b92 --- /dev/null +++ b/docs/versioned_docs/version-v../reference/NoirJS/backend_barretenberg/classes/BarretenbergBackend.md @@ -0,0 +1,185 @@ +# BarretenbergBackend + +## Implements + +- [`Backend`](../interfaces/Backend.md) + +## Constructors + +### new BarretenbergBackend(acirCircuit, options) + +```ts +new BarretenbergBackend(acirCircuit, options): BarretenbergBackend +``` + +#### Parameters + +| Parameter | Type | +| :------ | :------ | +| `acirCircuit` | [`CompiledCircuit`](../type-aliases/CompiledCircuit.md) | +| `options` | [`BackendOptions`](../type-aliases/BackendOptions.md) | + +#### Returns + +[`BarretenbergBackend`](BarretenbergBackend.md) + +## Methods + +### destroy() + +```ts +destroy(): Promise +``` + +#### Returns + +`Promise`\<`void`\> + +#### Implementation of + +[`Backend`](../interfaces/Backend.md).[`destroy`](../interfaces/Backend.md#destroy) + +#### Description + +Destroys the backend + +*** + +### generateFinalProof() + +```ts +generateFinalProof(decompressedWitness): Promise +``` + +#### Parameters + +| Parameter | Type | +| :------ | :------ | +| `decompressedWitness` | `Uint8Array` | + +#### Returns + +`Promise`\<[`ProofData`](../type-aliases/ProofData.md)\> + +#### Implementation of + +[`Backend`](../interfaces/Backend.md).[`generateFinalProof`](../interfaces/Backend.md#generatefinalproof) + +#### Description + +Generates a final proof (not meant to be verified in another circuit) + +*** + +### generateIntermediateProof() + +```ts +generateIntermediateProof(witness): Promise +``` + +#### Parameters + +| Parameter | Type | +| :------ | :------ | +| `witness` | `Uint8Array` | + +#### Returns + +`Promise`\<[`ProofData`](../type-aliases/ProofData.md)\> + +#### Implementation of + +[`Backend`](../interfaces/Backend.md).[`generateIntermediateProof`](../interfaces/Backend.md#generateintermediateproof) + +#### Example + +```typescript +const intermediateProof = await backend.generateIntermediateProof(witness); +``` + +*** + +### generateIntermediateProofArtifacts() + +```ts +generateIntermediateProofArtifacts(proofData, numOfPublicInputs): Promise +``` + +#### Parameters + +| Parameter | Type | Default value | +| :------ | :------ | :------ | +| `proofData` | [`ProofData`](../type-aliases/ProofData.md) | `undefined` | +| `numOfPublicInputs` | `number` | `0` | + +#### Returns + +`Promise`\<`object`\> + +#### Implementation of + +[`Backend`](../interfaces/Backend.md).[`generateIntermediateProofArtifacts`](../interfaces/Backend.md#generateintermediateproofartifacts) + +#### Example + +```typescript +const artifacts = await backend.generateIntermediateProofArtifacts(proof, numOfPublicInputs); +``` + +*** + +### verifyFinalProof() + +```ts +verifyFinalProof(proofData): Promise +``` + +#### Parameters + +| Parameter | Type | +| :------ | :------ | +| `proofData` | [`ProofData`](../type-aliases/ProofData.md) | + +#### Returns + +`Promise`\<`boolean`\> + +#### Implementation of + +[`Backend`](../interfaces/Backend.md).[`verifyFinalProof`](../interfaces/Backend.md#verifyfinalproof) + +#### Description + +Verifies a final proof + +*** + +### verifyIntermediateProof() + +```ts +verifyIntermediateProof(proofData): Promise +``` + +#### Parameters + +| Parameter | Type | +| :------ | :------ | +| `proofData` | [`ProofData`](../type-aliases/ProofData.md) | + +#### Returns + +`Promise`\<`boolean`\> + +#### Implementation of + +[`Backend`](../interfaces/Backend.md).[`verifyIntermediateProof`](../interfaces/Backend.md#verifyintermediateproof) + +#### Example + +```typescript +const isValidIntermediate = await backend.verifyIntermediateProof(proof); +``` + +*** + +Generated using [typedoc-plugin-markdown](https://www.npmjs.com/package/typedoc-plugin-markdown) and [TypeDoc](https://typedoc.org/) diff --git a/docs/versioned_docs/version-v../reference/NoirJS/backend_barretenberg/index.md b/docs/versioned_docs/version-v../reference/NoirJS/backend_barretenberg/index.md new file mode 100644 index 00000000000..bfbecb52864 --- /dev/null +++ b/docs/versioned_docs/version-v../reference/NoirJS/backend_barretenberg/index.md @@ -0,0 +1,45 @@ +# backend_barretenberg + +## Exports + +### Classes + +| Class | Description | +| :------ | :------ | +| [BarretenbergBackend](classes/BarretenbergBackend.md) | - | + +### Interfaces + +| Interface | Description | +| :------ | :------ | +| [Backend](interfaces/Backend.md) | - | + +### Type Aliases + +| Type alias | Description | +| :------ | :------ | +| [BackendOptions](type-aliases/BackendOptions.md) | - | +| [CompiledCircuit](type-aliases/CompiledCircuit.md) | - | +| [ProofData](type-aliases/ProofData.md) | - | + +## Functions + +### flattenPublicInputs() + +```ts +flattenPublicInputs(publicInputs): string[] +``` + +#### Parameters + +| Parameter | Type | +| :------ | :------ | +| `publicInputs` | `WitnessMap` | + +#### Returns + +`string`[] + +*** + +Generated using [typedoc-plugin-markdown](https://www.npmjs.com/package/typedoc-plugin-markdown) and [TypeDoc](https://typedoc.org/) diff --git a/docs/versioned_docs/version-v../reference/NoirJS/backend_barretenberg/interfaces/Backend.md b/docs/versioned_docs/version-v../reference/NoirJS/backend_barretenberg/interfaces/Backend.md new file mode 100644 index 00000000000..3eb9645c8d2 --- /dev/null +++ b/docs/versioned_docs/version-v../reference/NoirJS/backend_barretenberg/interfaces/Backend.md @@ -0,0 +1,132 @@ +# Backend + +## Methods + +### destroy() + +```ts +destroy(): Promise +``` + +#### Returns + +`Promise`\<`void`\> + +#### Description + +Destroys the backend + +*** + +### generateFinalProof() + +```ts +generateFinalProof(decompressedWitness): Promise +``` + +#### Parameters + +| Parameter | Type | +| :------ | :------ | +| `decompressedWitness` | `Uint8Array` | + +#### Returns + +`Promise`\<[`ProofData`](../type-aliases/ProofData.md)\> + +#### Description + +Generates a final proof (not meant to be verified in another circuit) + +*** + +### generateIntermediateProof() + +```ts +generateIntermediateProof(decompressedWitness): Promise +``` + +#### Parameters + +| Parameter | Type | +| :------ | :------ | +| `decompressedWitness` | `Uint8Array` | + +#### Returns + +`Promise`\<[`ProofData`](../type-aliases/ProofData.md)\> + +#### Description + +Generates an intermediate proof (meant to be verified in another circuit) + +*** + +### generateIntermediateProofArtifacts() + +```ts +generateIntermediateProofArtifacts(proofData, numOfPublicInputs): Promise +``` + +#### Parameters + +| Parameter | Type | +| :------ | :------ | +| `proofData` | [`ProofData`](../type-aliases/ProofData.md) | +| `numOfPublicInputs` | `number` | + +#### Returns + +`Promise`\<`object`\> + +#### Description + +Retrieves the artifacts from a proof in the Field format + +*** + +### verifyFinalProof() + +```ts +verifyFinalProof(proofData): Promise +``` + +#### Parameters + +| Parameter | Type | +| :------ | :------ | +| `proofData` | [`ProofData`](../type-aliases/ProofData.md) | + +#### Returns + +`Promise`\<`boolean`\> + +#### Description + +Verifies a final proof + +*** + +### verifyIntermediateProof() + +```ts +verifyIntermediateProof(proofData): Promise +``` + +#### Parameters + +| Parameter | Type | +| :------ | :------ | +| `proofData` | [`ProofData`](../type-aliases/ProofData.md) | + +#### Returns + +`Promise`\<`boolean`\> + +#### Description + +Verifies an intermediate proof + +*** + +Generated using [typedoc-plugin-markdown](https://www.npmjs.com/package/typedoc-plugin-markdown) and [TypeDoc](https://typedoc.org/) diff --git a/docs/versioned_docs/version-v../reference/NoirJS/backend_barretenberg/type-aliases/BackendOptions.md b/docs/versioned_docs/version-v../reference/NoirJS/backend_barretenberg/type-aliases/BackendOptions.md new file mode 100644 index 00000000000..266ade75d17 --- /dev/null +++ b/docs/versioned_docs/version-v../reference/NoirJS/backend_barretenberg/type-aliases/BackendOptions.md @@ -0,0 +1,19 @@ +# BackendOptions + +```ts +type BackendOptions: object; +``` + +## Description + +An options object, currently only used to specify the number of threads to use. + +## Type declaration + +| Member | Type | Description | +| :------ | :------ | :------ | +| `threads` | `number` | **Description**

Number of threads | + +*** + +Generated using [typedoc-plugin-markdown](https://www.npmjs.com/package/typedoc-plugin-markdown) and [TypeDoc](https://typedoc.org/) diff --git a/docs/versioned_docs/version-v../reference/NoirJS/backend_barretenberg/type-aliases/CompiledCircuit.md b/docs/versioned_docs/version-v../reference/NoirJS/backend_barretenberg/type-aliases/CompiledCircuit.md new file mode 100644 index 00000000000..34e0dd04205 --- /dev/null +++ b/docs/versioned_docs/version-v../reference/NoirJS/backend_barretenberg/type-aliases/CompiledCircuit.md @@ -0,0 +1,20 @@ +# CompiledCircuit + +```ts +type CompiledCircuit: object; +``` + +## Description + +The representation of a compiled circuit + +## Type declaration + +| Member | Type | Description | +| :------ | :------ | :------ | +| `abi` | `Abi` | **Description**

ABI representation of the circuit | +| `bytecode` | `string` | **Description**

The bytecode of the circuit | + +*** + +Generated using [typedoc-plugin-markdown](https://www.npmjs.com/package/typedoc-plugin-markdown) and [TypeDoc](https://typedoc.org/) diff --git a/docs/versioned_docs/version-v../reference/NoirJS/backend_barretenberg/type-aliases/ProofData.md b/docs/versioned_docs/version-v../reference/NoirJS/backend_barretenberg/type-aliases/ProofData.md new file mode 100644 index 00000000000..3eb360a78f1 --- /dev/null +++ b/docs/versioned_docs/version-v../reference/NoirJS/backend_barretenberg/type-aliases/ProofData.md @@ -0,0 +1,20 @@ +# ProofData + +```ts +type ProofData: object; +``` + +## Description + +The representation of a proof + +## Type declaration + +| Member | Type | Description | +| :------ | :------ | :------ | +| `proof` | `Uint8Array` | **Description**

An byte array representing the proof | +| `publicInputs` | `WitnessMap` | **Description**

Public inputs of a proof | + +*** + +Generated using [typedoc-plugin-markdown](https://www.npmjs.com/package/typedoc-plugin-markdown) and [TypeDoc](https://typedoc.org/) diff --git a/docs/versioned_docs/version-v../reference/NoirJS/backend_barretenberg/typedoc-sidebar.cjs b/docs/versioned_docs/version-v../reference/NoirJS/backend_barretenberg/typedoc-sidebar.cjs new file mode 100644 index 00000000000..2aaa55bccf6 --- /dev/null +++ b/docs/versioned_docs/version-v../reference/NoirJS/backend_barretenberg/typedoc-sidebar.cjs @@ -0,0 +1,4 @@ +// @ts-check +/** @type {import('@docusaurus/plugin-content-docs').SidebarsConfig} */ +const typedocSidebar = { items: [{"type":"category","label":"Classes","items":[{"type":"doc","id":"reference/NoirJS/backend_barretenberg/classes/BarretenbergBackend","label":"BarretenbergBackend"}]},{"type":"category","label":"Interfaces","items":[{"type":"doc","id":"reference/NoirJS/backend_barretenberg/interfaces/Backend","label":"Backend"}]},{"type":"category","label":"Type Aliases","items":[{"type":"doc","id":"reference/NoirJS/backend_barretenberg/type-aliases/BackendOptions","label":"BackendOptions"},{"type":"doc","id":"reference/NoirJS/backend_barretenberg/type-aliases/CompiledCircuit","label":"CompiledCircuit"},{"type":"doc","id":"reference/NoirJS/backend_barretenberg/type-aliases/ProofData","label":"ProofData"}]}]}; +module.exports = typedocSidebar.items; \ No newline at end of file diff --git a/docs/versioned_docs/version-v../reference/NoirJS/noir_js/.nojekyll b/docs/versioned_docs/version-v../reference/NoirJS/noir_js/.nojekyll new file mode 100644 index 00000000000..e2ac6616add --- /dev/null +++ b/docs/versioned_docs/version-v../reference/NoirJS/noir_js/.nojekyll @@ -0,0 +1 @@ +TypeDoc added this file to prevent GitHub Pages from using Jekyll. You can turn off this behavior by setting the `githubPages` option to false. \ No newline at end of file diff --git a/docs/versioned_docs/version-v../reference/NoirJS/noir_js/classes/Noir.md b/docs/versioned_docs/version-v../reference/NoirJS/noir_js/classes/Noir.md new file mode 100644 index 00000000000..34e20d99684 --- /dev/null +++ b/docs/versioned_docs/version-v../reference/NoirJS/noir_js/classes/Noir.md @@ -0,0 +1,132 @@ +# Noir + +## Constructors + +### new Noir(circuit, backend) + +```ts +new Noir(circuit, backend?): Noir +``` + +#### Parameters + +| Parameter | Type | +| :------ | :------ | +| `circuit` | [`CompiledCircuit`](../type-aliases/CompiledCircuit.md) | +| `backend`? | `Backend` | + +#### Returns + +[`Noir`](Noir.md) + +## Methods + +### destroy() + +```ts +destroy(): Promise +``` + +#### Returns + +`Promise`\<`void`\> + +#### Description + +Destroys the underlying backend instance. + +#### Example + +```typescript +await noir.destroy(); +``` + +*** + +### execute() + +```ts +execute(inputs, foreignCallHandler?): Promise +``` + +#### Parameters + +| Parameter | Type | +| :------ | :------ | +| `inputs` | [`InputMap`](../type-aliases/InputMap.md) | +| `foreignCallHandler`? | [`ForeignCallHandler`](../type-aliases/ForeignCallHandler.md) | + +#### Returns + +`Promise`\<`object`\> + +#### Description + +Allows to execute a circuit to get its witness and return value. + +#### Example + +```typescript +async execute(inputs) +``` + +*** + +### generateFinalProof() + +```ts +generateFinalProof(inputs, foreignCallHandler?): Promise +``` + +#### Parameters + +| Parameter | Type | +| :------ | :------ | +| `inputs` | [`InputMap`](../type-aliases/InputMap.md) | +| `foreignCallHandler`? | [`ForeignCallHandler`](../type-aliases/ForeignCallHandler.md) | + +#### Returns + +`Promise`\<[`ProofData`](../type-aliases/ProofData.md)\> + +#### Description + +Generates a witness and a proof given an object as input. + +#### Example + +```typescript +async generateFinalProof(input) +``` + +*** + +### verifyFinalProof() + +```ts +verifyFinalProof(proofData): Promise +``` + +#### Parameters + +| Parameter | Type | +| :------ | :------ | +| `proofData` | [`ProofData`](../type-aliases/ProofData.md) | + +#### Returns + +`Promise`\<`boolean`\> + +#### Description + +Instantiates the verification key and verifies a proof. + +#### Example + +```typescript +async verifyFinalProof(proof) +``` + +*** + +Generated using [typedoc-plugin-markdown](https://www.npmjs.com/package/typedoc-plugin-markdown) and [TypeDoc](https://typedoc.org/) diff --git a/docs/versioned_docs/version-v../reference/NoirJS/noir_js/functions/and.md b/docs/versioned_docs/version-v../reference/NoirJS/noir_js/functions/and.md new file mode 100644 index 00000000000..c783283e396 --- /dev/null +++ b/docs/versioned_docs/version-v../reference/NoirJS/noir_js/functions/and.md @@ -0,0 +1,22 @@ +# and() + +```ts +and(lhs, rhs): string +``` + +Performs a bitwise AND operation between `lhs` and `rhs` + +## Parameters + +| Parameter | Type | Description | +| :------ | :------ | :------ | +| `lhs` | `string` | | +| `rhs` | `string` | | + +## Returns + +`string` + +*** + +Generated using [typedoc-plugin-markdown](https://www.npmjs.com/package/typedoc-plugin-markdown) and [TypeDoc](https://typedoc.org/) diff --git a/docs/versioned_docs/version-v../reference/NoirJS/noir_js/functions/blake2s256.md b/docs/versioned_docs/version-v../reference/NoirJS/noir_js/functions/blake2s256.md new file mode 100644 index 00000000000..7882d0da8d5 --- /dev/null +++ b/docs/versioned_docs/version-v../reference/NoirJS/noir_js/functions/blake2s256.md @@ -0,0 +1,21 @@ +# blake2s256() + +```ts +blake2s256(inputs): Uint8Array +``` + +Calculates the Blake2s256 hash of the input bytes + +## Parameters + +| Parameter | Type | Description | +| :------ | :------ | :------ | +| `inputs` | `Uint8Array` | | + +## Returns + +`Uint8Array` + +*** + +Generated using [typedoc-plugin-markdown](https://www.npmjs.com/package/typedoc-plugin-markdown) and [TypeDoc](https://typedoc.org/) diff --git a/docs/versioned_docs/version-v../reference/NoirJS/noir_js/functions/ecdsa_secp256k1_verify.md b/docs/versioned_docs/version-v../reference/NoirJS/noir_js/functions/ecdsa_secp256k1_verify.md new file mode 100644 index 00000000000..0ba5783f0d5 --- /dev/null +++ b/docs/versioned_docs/version-v../reference/NoirJS/noir_js/functions/ecdsa_secp256k1_verify.md @@ -0,0 +1,29 @@ +# ecdsa\_secp256k1\_verify() + +```ts +ecdsa_secp256k1_verify( + hashed_msg, + public_key_x_bytes, + public_key_y_bytes, + signature): boolean +``` + +Calculates the Blake2s256 hash of the input bytes and represents these as a single field element. +Verifies a ECDSA signature over the secp256k1 curve. + +## Parameters + +| Parameter | Type | Description | +| :------ | :------ | :------ | +| `hashed_msg` | `Uint8Array` | | +| `public_key_x_bytes` | `Uint8Array` | | +| `public_key_y_bytes` | `Uint8Array` | | +| `signature` | `Uint8Array` | | + +## Returns + +`boolean` + +*** + +Generated using [typedoc-plugin-markdown](https://www.npmjs.com/package/typedoc-plugin-markdown) and [TypeDoc](https://typedoc.org/) diff --git a/docs/versioned_docs/version-v../reference/NoirJS/noir_js/functions/ecdsa_secp256r1_verify.md b/docs/versioned_docs/version-v../reference/NoirJS/noir_js/functions/ecdsa_secp256r1_verify.md new file mode 100644 index 00000000000..0b20ff68957 --- /dev/null +++ b/docs/versioned_docs/version-v../reference/NoirJS/noir_js/functions/ecdsa_secp256r1_verify.md @@ -0,0 +1,28 @@ +# ecdsa\_secp256r1\_verify() + +```ts +ecdsa_secp256r1_verify( + hashed_msg, + public_key_x_bytes, + public_key_y_bytes, + signature): boolean +``` + +Verifies a ECDSA signature over the secp256r1 curve. + +## Parameters + +| Parameter | Type | Description | +| :------ | :------ | :------ | +| `hashed_msg` | `Uint8Array` | | +| `public_key_x_bytes` | `Uint8Array` | | +| `public_key_y_bytes` | `Uint8Array` | | +| `signature` | `Uint8Array` | | + +## Returns + +`boolean` + +*** + +Generated using [typedoc-plugin-markdown](https://www.npmjs.com/package/typedoc-plugin-markdown) and [TypeDoc](https://typedoc.org/) diff --git a/docs/versioned_docs/version-v../reference/NoirJS/noir_js/functions/keccak256.md b/docs/versioned_docs/version-v../reference/NoirJS/noir_js/functions/keccak256.md new file mode 100644 index 00000000000..d10f155ce86 --- /dev/null +++ b/docs/versioned_docs/version-v../reference/NoirJS/noir_js/functions/keccak256.md @@ -0,0 +1,21 @@ +# keccak256() + +```ts +keccak256(inputs): Uint8Array +``` + +Calculates the Keccak256 hash of the input bytes + +## Parameters + +| Parameter | Type | Description | +| :------ | :------ | :------ | +| `inputs` | `Uint8Array` | | + +## Returns + +`Uint8Array` + +*** + +Generated using [typedoc-plugin-markdown](https://www.npmjs.com/package/typedoc-plugin-markdown) and [TypeDoc](https://typedoc.org/) diff --git a/docs/versioned_docs/version-v../reference/NoirJS/noir_js/functions/sha256.md b/docs/versioned_docs/version-v../reference/NoirJS/noir_js/functions/sha256.md new file mode 100644 index 00000000000..6ba4ecac022 --- /dev/null +++ b/docs/versioned_docs/version-v../reference/NoirJS/noir_js/functions/sha256.md @@ -0,0 +1,21 @@ +# sha256() + +```ts +sha256(inputs): Uint8Array +``` + +Calculates the SHA256 hash of the input bytes + +## Parameters + +| Parameter | Type | Description | +| :------ | :------ | :------ | +| `inputs` | `Uint8Array` | | + +## Returns + +`Uint8Array` + +*** + +Generated using [typedoc-plugin-markdown](https://www.npmjs.com/package/typedoc-plugin-markdown) and [TypeDoc](https://typedoc.org/) diff --git a/docs/versioned_docs/version-v../reference/NoirJS/noir_js/functions/xor.md b/docs/versioned_docs/version-v../reference/NoirJS/noir_js/functions/xor.md new file mode 100644 index 00000000000..8d762b895d3 --- /dev/null +++ b/docs/versioned_docs/version-v../reference/NoirJS/noir_js/functions/xor.md @@ -0,0 +1,22 @@ +# xor() + +```ts +xor(lhs, rhs): string +``` + +Performs a bitwise XOR operation between `lhs` and `rhs` + +## Parameters + +| Parameter | Type | Description | +| :------ | :------ | :------ | +| `lhs` | `string` | | +| `rhs` | `string` | | + +## Returns + +`string` + +*** + +Generated using [typedoc-plugin-markdown](https://www.npmjs.com/package/typedoc-plugin-markdown) and [TypeDoc](https://typedoc.org/) diff --git a/docs/versioned_docs/version-v../reference/NoirJS/noir_js/index.md b/docs/versioned_docs/version-v../reference/NoirJS/noir_js/index.md new file mode 100644 index 00000000000..8b9e35bc9a1 --- /dev/null +++ b/docs/versioned_docs/version-v../reference/NoirJS/noir_js/index.md @@ -0,0 +1,37 @@ +# noir_js + +## Exports + +### Classes + +| Class | Description | +| :------ | :------ | +| [Noir](classes/Noir.md) | - | + +### Type Aliases + +| Type alias | Description | +| :------ | :------ | +| [CompiledCircuit](type-aliases/CompiledCircuit.md) | - | +| [ForeignCallHandler](type-aliases/ForeignCallHandler.md) | A callback which performs an foreign call and returns the response. | +| [ForeignCallInput](type-aliases/ForeignCallInput.md) | - | +| [ForeignCallOutput](type-aliases/ForeignCallOutput.md) | - | +| [InputMap](type-aliases/InputMap.md) | - | +| [ProofData](type-aliases/ProofData.md) | - | +| [WitnessMap](type-aliases/WitnessMap.md) | - | + +### Functions + +| Function | Description | +| :------ | :------ | +| [and](functions/and.md) | Performs a bitwise AND operation between `lhs` and `rhs` | +| [blake2s256](functions/blake2s256.md) | Calculates the Blake2s256 hash of the input bytes | +| [ecdsa\_secp256k1\_verify](functions/ecdsa_secp256k1_verify.md) | Calculates the Blake2s256 hash of the input bytes and represents these as a single field element. | +| [ecdsa\_secp256r1\_verify](functions/ecdsa_secp256r1_verify.md) | Verifies a ECDSA signature over the secp256r1 curve. | +| [keccak256](functions/keccak256.md) | Calculates the Keccak256 hash of the input bytes | +| [sha256](functions/sha256.md) | Calculates the SHA256 hash of the input bytes | +| [xor](functions/xor.md) | Performs a bitwise XOR operation between `lhs` and `rhs` | + +*** + +Generated using [typedoc-plugin-markdown](https://www.npmjs.com/package/typedoc-plugin-markdown) and [TypeDoc](https://typedoc.org/) diff --git a/docs/versioned_docs/version-v../reference/NoirJS/noir_js/type-aliases/CompiledCircuit.md b/docs/versioned_docs/version-v../reference/NoirJS/noir_js/type-aliases/CompiledCircuit.md new file mode 100644 index 00000000000..34e0dd04205 --- /dev/null +++ b/docs/versioned_docs/version-v../reference/NoirJS/noir_js/type-aliases/CompiledCircuit.md @@ -0,0 +1,20 @@ +# CompiledCircuit + +```ts +type CompiledCircuit: object; +``` + +## Description + +The representation of a compiled circuit + +## Type declaration + +| Member | Type | Description | +| :------ | :------ | :------ | +| `abi` | `Abi` | **Description**

ABI representation of the circuit | +| `bytecode` | `string` | **Description**

The bytecode of the circuit | + +*** + +Generated using [typedoc-plugin-markdown](https://www.npmjs.com/package/typedoc-plugin-markdown) and [TypeDoc](https://typedoc.org/) diff --git a/docs/versioned_docs/version-v../reference/NoirJS/noir_js/type-aliases/ForeignCallHandler.md b/docs/versioned_docs/version-v../reference/NoirJS/noir_js/type-aliases/ForeignCallHandler.md new file mode 100644 index 00000000000..812b8b16481 --- /dev/null +++ b/docs/versioned_docs/version-v../reference/NoirJS/noir_js/type-aliases/ForeignCallHandler.md @@ -0,0 +1,24 @@ +# ForeignCallHandler + +```ts +type ForeignCallHandler: (name, inputs) => Promise; +``` + +A callback which performs an foreign call and returns the response. + +## Parameters + +| Parameter | Type | Description | +| :------ | :------ | :------ | +| `name` | `string` | The identifier for the type of foreign call being performed. | +| `inputs` | [`ForeignCallInput`](ForeignCallInput.md)[] | An array of hex encoded inputs to the foreign call. | + +## Returns + +`Promise`\<[`ForeignCallOutput`](ForeignCallOutput.md)[]\> + +outputs - An array of hex encoded outputs containing the results of the foreign call. + +*** + +Generated using [typedoc-plugin-markdown](https://www.npmjs.com/package/typedoc-plugin-markdown) and [TypeDoc](https://typedoc.org/) diff --git a/docs/versioned_docs/version-v../reference/NoirJS/noir_js/type-aliases/ForeignCallInput.md b/docs/versioned_docs/version-v../reference/NoirJS/noir_js/type-aliases/ForeignCallInput.md new file mode 100644 index 00000000000..dd95809186a --- /dev/null +++ b/docs/versioned_docs/version-v../reference/NoirJS/noir_js/type-aliases/ForeignCallInput.md @@ -0,0 +1,9 @@ +# ForeignCallInput + +```ts +type ForeignCallInput: string[]; +``` + +*** + +Generated using [typedoc-plugin-markdown](https://www.npmjs.com/package/typedoc-plugin-markdown) and [TypeDoc](https://typedoc.org/) diff --git a/docs/versioned_docs/version-v../reference/NoirJS/noir_js/type-aliases/ForeignCallOutput.md b/docs/versioned_docs/version-v../reference/NoirJS/noir_js/type-aliases/ForeignCallOutput.md new file mode 100644 index 00000000000..b71fb78a946 --- /dev/null +++ b/docs/versioned_docs/version-v../reference/NoirJS/noir_js/type-aliases/ForeignCallOutput.md @@ -0,0 +1,9 @@ +# ForeignCallOutput + +```ts +type ForeignCallOutput: string | string[]; +``` + +*** + +Generated using [typedoc-plugin-markdown](https://www.npmjs.com/package/typedoc-plugin-markdown) and [TypeDoc](https://typedoc.org/) diff --git a/docs/versioned_docs/version-v../reference/NoirJS/noir_js/type-aliases/InputMap.md b/docs/versioned_docs/version-v../reference/NoirJS/noir_js/type-aliases/InputMap.md new file mode 100644 index 00000000000..c714e999d93 --- /dev/null +++ b/docs/versioned_docs/version-v../reference/NoirJS/noir_js/type-aliases/InputMap.md @@ -0,0 +1,13 @@ +# InputMap + +```ts +type InputMap: object; +``` + +## Index signature + + \[`key`: `string`\]: `InputValue` + +*** + +Generated using [typedoc-plugin-markdown](https://www.npmjs.com/package/typedoc-plugin-markdown) and [TypeDoc](https://typedoc.org/) diff --git a/docs/versioned_docs/version-v../reference/NoirJS/noir_js/type-aliases/ProofData.md b/docs/versioned_docs/version-v../reference/NoirJS/noir_js/type-aliases/ProofData.md new file mode 100644 index 00000000000..3eb360a78f1 --- /dev/null +++ b/docs/versioned_docs/version-v../reference/NoirJS/noir_js/type-aliases/ProofData.md @@ -0,0 +1,20 @@ +# ProofData + +```ts +type ProofData: object; +``` + +## Description + +The representation of a proof + +## Type declaration + +| Member | Type | Description | +| :------ | :------ | :------ | +| `proof` | `Uint8Array` | **Description**

An byte array representing the proof | +| `publicInputs` | `WitnessMap` | **Description**

Public inputs of a proof | + +*** + +Generated using [typedoc-plugin-markdown](https://www.npmjs.com/package/typedoc-plugin-markdown) and [TypeDoc](https://typedoc.org/) diff --git a/docs/versioned_docs/version-v../reference/NoirJS/noir_js/type-aliases/WitnessMap.md b/docs/versioned_docs/version-v../reference/NoirJS/noir_js/type-aliases/WitnessMap.md new file mode 100644 index 00000000000..258c46f9d0c --- /dev/null +++ b/docs/versioned_docs/version-v../reference/NoirJS/noir_js/type-aliases/WitnessMap.md @@ -0,0 +1,9 @@ +# WitnessMap + +```ts +type WitnessMap: Map; +``` + +*** + +Generated using [typedoc-plugin-markdown](https://www.npmjs.com/package/typedoc-plugin-markdown) and [TypeDoc](https://typedoc.org/) diff --git a/docs/versioned_docs/version-v../reference/NoirJS/noir_js/typedoc-sidebar.cjs b/docs/versioned_docs/version-v../reference/NoirJS/noir_js/typedoc-sidebar.cjs new file mode 100644 index 00000000000..fe2629ddc9f --- /dev/null +++ b/docs/versioned_docs/version-v../reference/NoirJS/noir_js/typedoc-sidebar.cjs @@ -0,0 +1,4 @@ +// @ts-check +/** @type {import('@docusaurus/plugin-content-docs').SidebarsConfig} */ +const typedocSidebar = { items: [{"type":"category","label":"Classes","items":[{"type":"doc","id":"reference/NoirJS/noir_js/classes/Noir","label":"Noir"}]},{"type":"category","label":"Type Aliases","items":[{"type":"doc","id":"reference/NoirJS/noir_js/type-aliases/CompiledCircuit","label":"CompiledCircuit"},{"type":"doc","id":"reference/NoirJS/noir_js/type-aliases/ForeignCallHandler","label":"ForeignCallHandler"},{"type":"doc","id":"reference/NoirJS/noir_js/type-aliases/ForeignCallInput","label":"ForeignCallInput"},{"type":"doc","id":"reference/NoirJS/noir_js/type-aliases/ForeignCallOutput","label":"ForeignCallOutput"},{"type":"doc","id":"reference/NoirJS/noir_js/type-aliases/InputMap","label":"InputMap"},{"type":"doc","id":"reference/NoirJS/noir_js/type-aliases/ProofData","label":"ProofData"},{"type":"doc","id":"reference/NoirJS/noir_js/type-aliases/WitnessMap","label":"WitnessMap"}]},{"type":"category","label":"Functions","items":[{"type":"doc","id":"reference/NoirJS/noir_js/functions/and","label":"and"},{"type":"doc","id":"reference/NoirJS/noir_js/functions/blake2s256","label":"blake2s256"},{"type":"doc","id":"reference/NoirJS/noir_js/functions/ecdsa_secp256k1_verify","label":"ecdsa_secp256k1_verify"},{"type":"doc","id":"reference/NoirJS/noir_js/functions/ecdsa_secp256r1_verify","label":"ecdsa_secp256r1_verify"},{"type":"doc","id":"reference/NoirJS/noir_js/functions/keccak256","label":"keccak256"},{"type":"doc","id":"reference/NoirJS/noir_js/functions/sha256","label":"sha256"},{"type":"doc","id":"reference/NoirJS/noir_js/functions/xor","label":"xor"}]}]}; +module.exports = typedocSidebar.items; \ No newline at end of file diff --git a/docs/versioned_docs/version-v../reference/_category_.json b/docs/versioned_docs/version-v../reference/_category_.json new file mode 100644 index 00000000000..5b6a20a609a --- /dev/null +++ b/docs/versioned_docs/version-v../reference/_category_.json @@ -0,0 +1,5 @@ +{ + "position": 4, + "collapsible": true, + "collapsed": true +} diff --git a/docs/versioned_docs/version-v../reference/nargo_commands.md b/docs/versioned_docs/version-v../reference/nargo_commands.md new file mode 100644 index 00000000000..ff3dee8973f --- /dev/null +++ b/docs/versioned_docs/version-v../reference/nargo_commands.md @@ -0,0 +1,250 @@ +--- +title: Nargo +description: + Noir CLI Commands for Noir Prover and Verifier to create, execute, prove and verify programs, + generate Solidity verifier smart contract and compile into JSON file containing ACIR + representation and ABI of circuit. +keywords: + [ + Nargo, + Noir CLI, + Noir Prover, + Noir Verifier, + generate Solidity verifier, + compile JSON file, + ACIR representation, + ABI of circuit, + TypeScript, + ] +sidebar_position: 0 +--- + +## General options + +| Option | Description | +| -------------------- | -------------------------------------------------- | +| `--show-ssa` | Emit debug information for the intermediate SSA IR | +| `--deny-warnings` | Quit execution when warnings are emitted | +| `--silence-warnings` | Suppress warnings | +| `-h, --help` | Print help | + +## `nargo help [subcommand]` + +Prints the list of available commands or specific information of a subcommand. + +_Arguments_ + +| Argument | Description | +| -------------- | -------------------------------------------- | +| `` | The subcommand whose help message to display | + +## `nargo backend` + +Installs and selects custom backends used to generate and verify proofs. + +### Commands + +| Command | Description | +| ----------- | --------------------------------------------------------- | +| `current` | Prints the name of the currently active backend | +| `ls` | Prints the list of currently installed backends | +| `use` | Select the backend to use | +| `install` | Install a new backend from a URL | +| `uninstall` | Uninstalls a backend | +| `help` | Print this message or the help of the given subcommand(s) | + +### Options + +| Option | Description | +| ------------ | ----------- | +| `-h, --help` | Print help | + +## `nargo check` + +Generate the `Prover.toml` and `Verifier.toml` files for specifying prover and verifier in/output +values of the Noir program respectively. + +### Options + +| Option | Description | +| --------------------- | ------------------------------------- | +| `--package ` | The name of the package to check | +| `--workspace` | Check all packages in the workspace | +| `--print-acir` | Display the ACIR for compiled circuit | +| `--deny-warnings` | Treat all warnings as errors | +| `--silence-warnings` | Suppress warnings | +| `-h, --help` | Print help | + +### `nargo codegen-verifier` + +Generate a Solidity verifier smart contract for the program. + +### Options + +| Option | Description | +| --------------------- | ------------------------------------- | +| `--package ` | The name of the package to codegen | +| `--workspace` | Codegen all packages in the workspace | +| `--print-acir` | Display the ACIR for compiled circuit | +| `--deny-warnings` | Treat all warnings as errors | +| `--silence-warnings` | Suppress warnings | +| `-h, --help` | Print help | + +## `nargo compile` + +Compile the program into a JSON build artifact file containing the ACIR representation and the ABI +of the circuit. This build artifact can then be used to generate and verify proofs. + +You can also use "build" as an alias for compile (e.g. `nargo build`). + +### Options + +| Option | Description | +| --------------------- | ------------------------------------------------------------ | +| `--package ` | The name of the package to compile | +| `--workspace` | Compile all packages in the workspace | +| `--print-acir` | Display the ACIR for compiled circuit | +| `--deny-warnings` | Treat all warnings as errors | +| `--silence-warnings` | Suppress warnings | +| `-h, --help` | Print help | + +## `nargo new ` + +Creates a new Noir project in a new folder. + +**Arguments** + +| Argument | Description | +| -------- | -------------------------------- | +| `` | The path to save the new project | + +### Options + +| Option | Description | +| --------------- | ----------------------------------------------------- | +| `--name ` | Name of the package [default: package directory name] | +| `--lib` | Use a library template | +| `--bin` | Use a binary template [default] | +| `--contract` | Use a contract template | +| `-h, --help` | Print help | + +## `nargo init` + +Creates a new Noir project in the current directory. + +### Options + +| Option | Description | +| --------------- | ----------------------------------------------------- | +| `--name ` | Name of the package [default: current directory name] | +| `--lib` | Use a library template | +| `--bin` | Use a binary template [default] | +| `--contract` | Use a contract template | +| `-h, --help` | Print help | + +## `nargo execute [WITNESS_NAME]` + +Runs the Noir program and prints its return value. + +**Arguments** + +| Argument | Description | +| ---------------- | ----------------------------------------- | +| `[WITNESS_NAME]` | Write the execution witness to named file | + +### Options + +| Option | Description | +| --------------------------------- | ------------------------------------------------------------------------------------ | +| `-p, --prover-name ` | The name of the toml file which contains the inputs for the prover [default: Prover] | +| `--package ` | The name of the package to execute | +| `--workspace` | Execute all packages in the workspace | +| `--print-acir` | Display the ACIR for compiled circuit | +| `--deny-warnings` | Treat all warnings as errors | +| `--silence-warnings` | Suppress warnings | +| `-h, --help` | Print help | + +_Usage_ + +The inputs to the circuit are read from the `Prover.toml` file generated by `nargo check`, which +must be filled in. + +To save the witness to file, run the command with a value for the `WITNESS_NAME` argument. A +`.tr` file will then be saved in the `./target` folder. + +## `nargo prove` + +Creates a proof for the program. + +### Options + +| Option | Description | +| ------------------------------------- | ---------------------------------------------------------------------------------------- | +| `-p, --prover-name ` | The name of the toml file which contains the inputs for the prover [default: Prover] | +| `-v, --verifier-name ` | The name of the toml file which contains the inputs for the verifier [default: Verifier] | +| `--verify` | Verify proof after proving | +| `--package ` | The name of the package to prove | +| `--workspace` | Prove all packages in the workspace | +| `--print-acir` | Display the ACIR for compiled circuit | +| `--deny-warnings` | Treat all warnings as errors | +| `--silence-warnings` | Suppress warnings | +| `-h, --help` | Print help | + +## `nargo verify` + +Given a proof and a program, verify whether the proof is valid. + +### Options + +| Option | Description | +| ------------------------------------- | ---------------------------------------------------------------------------------------- | +| `-v, --verifier-name ` | The name of the toml file which contains the inputs for the verifier [default: Verifier] | +| `--package ` | The name of the package to verify | +| `--workspace` | Verify all packages in the workspace | +| `--print-acir` | Display the ACIR for compiled circuit | +| `--deny-warnings` | Treat all warnings as errors | +| `--silence-warnings` | Suppress warnings | +| `-h, --help` | Print help | + +## `nargo test [TEST_NAME]` + +Nargo will automatically compile and run any functions which have the decorator `#[test]` on them if +you run `nargo test`. To print `println` statements in tests, use the `--show-output` flag. + +Takes an optional `--exact` flag which allows you to select tests based on an exact name. + +See an example on the [testing page](../getting_started/tooling/testing.md). + +### Options + +| Option | Description | +| --------------------- | -------------------------------------- | +| `--show-output` | Display output of `println` statements | +| `--exact` | Only run tests that match exactly | +| `--package ` | The name of the package to test | +| `--workspace` | Test all packages in the workspace | +| `--print-acir` | Display the ACIR for compiled circuit | +| `--deny-warnings` | Treat all warnings as errors | +| `--silence-warnings` | Suppress warnings | +| `-h, --help` | Print help | + +## `nargo info` + +Prints a table containing the information of the package. + +Currently the table provide + +1. The number of ACIR opcodes +2. The final number gates in the circuit used by a backend + +If the file contains a contract the table will provide the +above information about each function of the contract. + +## `nargo lsp` + +Start a long-running Language Server process that communicates over stdin/stdout. +Usually this command is not run by a user, but instead will be run by a Language Client, such as [vscode-noir](https://github.com/noir-lang/vscode-noir). + +## `nargo fmt` + +Automatically formats your Noir source code based on the default formatting settings. diff --git a/docs/versioned_docs/version-v../tutorials/noirjs_app.md b/docs/versioned_docs/version-v../tutorials/noirjs_app.md new file mode 100644 index 00000000000..302ee4aeade --- /dev/null +++ b/docs/versioned_docs/version-v../tutorials/noirjs_app.md @@ -0,0 +1,261 @@ +--- +title: Tiny NoirJS app +description: Learn how to setup a new app that uses Noir to generate and verify zero-knowledge SNARK proofs in a typescript or javascript environment +keywords: [how to, guide, javascript, typescript, noir, barretenberg, zero-knowledge, proofs] +sidebar_position: 0 +--- + +NoirJS works both on the browser and on the server, and works for both ESM and CJS module systems. In this page, we will learn how can we write a simple test and a simple web app to verify the standard Noir example. + +You can find the complete app code for this guide [here](https://github.com/noir-lang/tiny-noirjs-app). + +## Before we start + +:::note + +Feel free to use whatever versions, just keep in mind that Nargo and the NoirJS packages are meant to be in sync. For example, Nargo 0.18.x matches `noir_js@0.18.x`, etc. + +In this guide, we will be pinned to 0.17.0. + +::: + +Make sure you have Node installed on your machine by opening a terminal and executing `node --version`. If you don't see a version, you should install [node](https://github.com/nvm-sh/nvm). You can also use `yarn` if you prefer that package manager over npm (which comes with node). + +First of all, follow the the [Nargo guide](../getting_started/installation/index.md) to install nargo version 0.17.0 and create a new project with `nargo new circuit`. Once there, `cd` into the `circuit` folder. You should then be able to compile your circuit into `json` format and see it inside the `target` folder: + +```bash +nargo compile +``` + +Your folder structure should look like: + +```tree +. +└── circuit + ├── Nargo.toml + ├── src + │ └── main.nr + └── target + └── circuit.json +``` + +## Starting a new project + +Go back to the previous folder and start a new project by running run `npm init`. You can configure your project or just leave the defaults, and see a `package.json` appear in your root folder. + +## Installing dependencies + +We'll need two `npm` packages. These packages will provide us the methods we need to run and verify proofs: + +```bash +npm i @noir-lang/backend_barretenberg@^0.17.0 @noir-lang/noir_js@^0.17.0 +``` + +To serve our page, we can use a build tool such as `vite`. Because we're gonna use some `wasm` files, we need to install a plugin as well. Run: + +```bash +npm i --save-dev vite rollup-plugin-copy +``` + +Since we're on the dependency world, we may as well define a nice starting script. Vite makes it easy. Just open `package.json`, find the block "scripts" and add this just below the line with `"test" : "echo......."`: + +```json + "start": "vite --open" +``` + +If you want do build a static website, you can also add some build and preview scripts: + +```json + "build": "vite build", + "preview": "vite preview" +``` + +## Vite plugins + +Vite is great, but support from `wasm` doesn't work out-of-the-box. We're gonna write a quick plugin and use another one. Just copy and paste this into a file named `vite.config.js`. You don't need to understand it, just trust me bro. + +```js +import { defineConfig } from 'vite'; +import copy from 'rollup-plugin-copy'; +import fs from 'fs'; +import path from 'path'; + +const wasmContentTypePlugin = { + name: 'wasm-content-type-plugin', + configureServer(server) { + server.middlewares.use(async (req, res, next) => { + if (req.url.endsWith('.wasm')) { + res.setHeader('Content-Type', 'application/wasm'); + const newPath = req.url.replace('deps', 'dist'); + const targetPath = path.join(__dirname, newPath); + const wasmContent = fs.readFileSync(targetPath); + return res.end(wasmContent); + } + next(); + }); + }, +}; + +export default defineConfig(({ command }) => { + if (command === 'serve') { + return { + plugins: [ + copy({ + targets: [{ src: 'node_modules/**/*.wasm', dest: 'node_modules/.vite/dist' }], + copySync: true, + hook: 'buildStart', + }), + command === 'serve' ? wasmContentTypePlugin : [], + ], + }; + } + + return {}; +}); +``` + +## HTML + +Here's the simplest HTML with some terrible UI. Create a file called `index.html` and paste this: + +```html + + + + + + +

Very basic Noir app

+
+

Logs

+

Proof

+
+ + +``` + +## Some good old vanilla Javascript + +Create a new file `app.js`, which is where our javascript code will live. Let's start with this code inside: + +```js +document.addEventListener('DOMContentLoaded', async () => { + // here's where the magic happens +}); + +function display(container, msg) { + const c = document.getElementById(container); + const p = document.createElement('p'); + p.textContent = msg; + c.appendChild(p); +} +``` + +We can manipulate our website with this little function, so we can see our website working. + +## Adding Noir + +If you come from the previous page, your folder structure should look like this: + +```tree +├── app.js +├── circuit +│ ├── Nargo.toml +│ ├── src +│ │ └── main.nr +│ └── target +│ └── circuit.json +├── index.html +├── package.json +└── vite.config.js +``` + +You'll see other files and folders showing up (like `package-lock.json`, `yarn.lock`, `node_modules`) but you shouldn't have to care about those. + +## Importing our dependencies + +We're starting with the good stuff now. At the top of the new javascript file, import the packages: + +```ts +import { BarretenbergBackend } from '@noir-lang/backend_barretenberg'; +import { Noir } from '@noir-lang/noir_js'; +``` + +We also need to import the `circuit` JSON file we created. If you have the suggested folder structure, you can add this line: + +```ts +import circuit from './circuit/target/circuit.json'; +``` + +## Write code + +:::note + +We're gonna be adding code inside the `document.addEventListener...etc` block: + +```js +// forget stuff here +document.addEventListener('DOMContentLoaded', async () => { + // here's where the magic happens +}); +// forget stuff here +``` + +::: + +Our dependencies exported two classes: `BarretenbergBackend` and `Noir`. Let's `init` them and add some logs, just to flex: + +```ts +const backend = new BarretenbergBackend(circuit); +const noir = new Noir(circuit, backend); +``` + +## Proving + +Now we're ready to prove stuff! Let's feed some inputs to our circuit and calculate the proof: + +```js +const input = { x: 1, y: 2 }; +display('logs', 'Generating proof... ⌛'); +const proof = await noir.generateFinalProof(input); +display('logs', 'Generating proof... ✅'); +display('results', proof.proof); +``` + +You're probably eager to see stuff happening, so go and run your app now! + +From your terminal, run `npm start` (or `yarn start`). If it doesn't open a browser for you, just visit `localhost:5173`. On a modern laptop, proof will generate in less than 100ms, and you'll see this: + +![Getting Started 0](@site/static/img/noir_getting_started_1.png) + +If you're human, you shouldn't be able to understand anything on the "proof" box. That's OK. We like you, human. + +In any case, this means your proof was generated! But you shouldn't trust me just yet. Add these lines to see it being verified: + +```js +display('logs', 'Verifying proof... ⌛'); +const verification = await noir.verifyFinalProof(proof); +if (verification) display('logs', 'Verifying proof... ✅'); +``` + +By saving, your app will refresh and here's our complete Tiny Noir App! + +You can find the complete app code for this guide [here](https://github.com/noir-lang/tiny-noirjs-app). + +## Further Reading + +You can see how noirjs is used in a full stack Next.js hardhat application in the [noir-starter repo here](https://github.com/noir-lang/noir-starter/tree/main/next-hardhat). The example shows how to calculate a proof in the browser and verify it with a deployed Solidity verifier contract from noirjs. + +You should also check out the more advanced examples in the [noir-examples repo](https://github.com/noir-lang/noir-examples), where you'll find reference usage for some cool apps. diff --git a/docs/versioned_sidebars/version-v..-sidebars.json b/docs/versioned_sidebars/version-v..-sidebars.json new file mode 100644 index 00000000000..b16f79cc176 --- /dev/null +++ b/docs/versioned_sidebars/version-v..-sidebars.json @@ -0,0 +1,83 @@ +{ + "sidebar": [ + { + "type": "doc", + "id": "index" + }, + { + "type": "category", + "label": "Getting Started", + "items": [ + { + "type": "autogenerated", + "dirName": "getting_started" + } + ] + }, + { + "type": "category", + "label": "The Noir Language", + "items": [ + { + "type": "autogenerated", + "dirName": "noir" + } + ] + }, + { + "type": "html", + "value": "
", + "defaultStyle": true + }, + { + "type": "category", + "label": "How To Guides", + "items": [ + { + "type": "autogenerated", + "dirName": "how_to" + } + ] + }, + { + "type": "category", + "label": "Explainers", + "items": [ + { + "type": "autogenerated", + "dirName": "explainers" + } + ] + }, + { + "type": "category", + "label": "Tutorials", + "items": [ + { + "type": "autogenerated", + "dirName": "tutorials" + } + ] + }, + { + "type": "category", + "label": "Reference", + "items": [ + { + "type": "autogenerated", + "dirName": "reference" + } + ] + }, + { + "type": "html", + "value": "
", + "defaultStyle": true + }, + { + "type": "doc", + "id": "migration_notes", + "label": "Migration notes" + } + ] +} diff --git a/docs/versions.json b/docs/versions.json deleted file mode 100644 index 7e140c94b73..00000000000 --- a/docs/versions.json +++ /dev/null @@ -1,4 +0,0 @@ -[ - "v0.19.4", - "v0.17.0" -] diff --git a/flake.nix b/flake.nix index 0ec712aa082..6a146becbb8 100644 --- a/flake.nix +++ b/flake.nix @@ -73,7 +73,7 @@ # Configuration shared between builds config = { # x-release-please-start-version - version = "0.20.0"; + version = "0.22.0"; # x-release-please-end src = pkgs.lib.cleanSourceWith { diff --git a/noir_stdlib/src/hash.nr b/noir_stdlib/src/hash.nr index 157d6518367..ad7e4f2e28f 100644 --- a/noir_stdlib/src/hash.nr +++ b/noir_stdlib/src/hash.nr @@ -16,7 +16,7 @@ pub fn pedersen_commitment(input: [Field; N]) -> PedersenPoint { pedersen_commitment_with_separator(input, 0) } -#[foreign(pedersen)] +#[foreign(pedersen_commitment)] pub fn __pedersen_commitment_with_separator(_input: [Field; N], _separator: u32) -> [Field; 2] {} pub fn pedersen_commitment_with_separator(input: [Field; N], separator: u32) -> PedersenPoint { diff --git a/release-please-config.json b/release-please-config.json index 562de471f0b..e73993ca974 100644 --- a/release-please-config.json +++ b/release-please-config.json @@ -65,7 +65,6 @@ "blackbox_solver/Cargo.toml", "brillig/Cargo.toml", "brillig_vm/Cargo.toml", - "stdlib/Cargo.toml", { "type": "json", "path": "acvm_js/package.json", @@ -81,5 +80,6 @@ }, "plugins": [ "sentence-case" - ] -} + ], + "bootstrap-sha": "690cfc0468de0b9aee53ccfe832c71c16e61e5fc" +} \ No newline at end of file diff --git a/test_programs/execution_success/brillig_set_slice_of_slice/Nargo.toml b/test_programs/compile_success_empty/brillig_set_slice_of_slice/Nargo.toml similarity index 100% rename from test_programs/execution_success/brillig_set_slice_of_slice/Nargo.toml rename to test_programs/compile_success_empty/brillig_set_slice_of_slice/Nargo.toml diff --git a/test_programs/execution_success/brillig_set_slice_of_slice/src/main.nr b/test_programs/compile_success_empty/brillig_set_slice_of_slice/src/main.nr similarity index 100% rename from test_programs/execution_success/brillig_set_slice_of_slice/src/main.nr rename to test_programs/compile_success_empty/brillig_set_slice_of_slice/src/main.nr diff --git a/test_programs/execution_success/regression_3635/Nargo.toml b/test_programs/compile_success_empty/regression_3635/Nargo.toml similarity index 100% rename from test_programs/execution_success/regression_3635/Nargo.toml rename to test_programs/compile_success_empty/regression_3635/Nargo.toml diff --git a/test_programs/execution_success/regression_3635/src/main.nr b/test_programs/compile_success_empty/regression_3635/src/main.nr similarity index 100% rename from test_programs/execution_success/regression_3635/src/main.nr rename to test_programs/compile_success_empty/regression_3635/src/main.nr diff --git a/test_programs/execution_success/regression_3635/Prover.toml b/test_programs/execution_success/regression_3635/Prover.toml deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/tooling/backend_interface/Cargo.toml b/tooling/backend_interface/Cargo.toml index a9217af65d2..32c5d28e3b0 100644 --- a/tooling/backend_interface/Cargo.toml +++ b/tooling/backend_interface/Cargo.toml @@ -18,7 +18,7 @@ serde_json.workspace = true bb_abstraction_leaks.workspace = true log.workspace = true -tempfile = "3.6.0" +tempfile.workspace = true ## bb binary downloading tar = "~0.4.15" diff --git a/tooling/backend_interface/src/cli/info.rs b/tooling/backend_interface/src/cli/info.rs index d3fd89bd2bc..81b811f0e32 100644 --- a/tooling/backend_interface/src/cli/info.rs +++ b/tooling/backend_interface/src/cli/info.rs @@ -1,9 +1,8 @@ -use acvm::Language; +use acvm::ExpressionWidth; use serde::Deserialize; -use std::collections::HashSet; use std::path::{Path, PathBuf}; -use crate::{BackendError, BackendOpcodeSupport}; +use crate::BackendError; use super::string_from_stderr; @@ -14,7 +13,11 @@ pub(crate) struct InfoCommand { #[derive(Deserialize)] struct InfoResponse { language: LanguageResponse, + #[allow(dead_code)] + #[deprecated(note = "This field is deprecated and will be removed in the future")] opcodes_supported: Vec, + #[allow(dead_code)] + #[deprecated(note = "This field is deprecated and will be removed in the future")] black_box_functions_supported: Vec, } @@ -24,20 +27,8 @@ struct LanguageResponse { width: Option, } -impl BackendOpcodeSupport { - fn new(info: InfoResponse) -> Self { - let opcodes: HashSet = info.opcodes_supported.into_iter().collect(); - let black_box_functions: HashSet = - info.black_box_functions_supported.into_iter().collect(); - Self { opcodes, black_box_functions } - } -} - impl InfoCommand { - pub(crate) fn run( - self, - binary_path: &Path, - ) -> Result<(Language, BackendOpcodeSupport), BackendError> { + pub(crate) fn run(self, binary_path: &Path) -> Result { let mut command = std::process::Command::new(binary_path); command.arg("info").arg("-c").arg(self.crs_path).arg("-o").arg("-"); @@ -50,32 +41,27 @@ impl InfoCommand { let backend_info: InfoResponse = serde_json::from_slice(&output.stdout).expect("Backend should return valid json"); - let language: Language = match backend_info.language.name.as_str() { + let expression_width: ExpressionWidth = match backend_info.language.name.as_str() { "PLONK-CSAT" => { let width = backend_info.language.width.unwrap(); - Language::PLONKCSat { width } + ExpressionWidth::Bounded { width } } - "R1CS" => Language::R1CS, - _ => panic!("Unknown langauge"), + "R1CS" => ExpressionWidth::Unbounded, + _ => panic!("Unknown Expression width configuration"), }; - Ok((language, BackendOpcodeSupport::new(backend_info))) + Ok(expression_width) } } #[test] fn info_command() -> Result<(), BackendError> { - use acvm::acir::circuit::opcodes::Opcode; - - use acvm::acir::native_types::Expression; - let backend = crate::get_mock_backend()?; let crs_path = backend.backend_directory(); - let (language, opcode_support) = InfoCommand { crs_path }.run(backend.binary_path())?; + let expression_width = InfoCommand { crs_path }.run(backend.binary_path())?; - assert!(matches!(language, Language::PLONKCSat { width: 3 })); - assert!(opcode_support.is_opcode_supported(&Opcode::Arithmetic(Expression::default()))); + assert!(matches!(expression_width, ExpressionWidth::Bounded { width: 3 })); Ok(()) } diff --git a/tooling/backend_interface/src/lib.rs b/tooling/backend_interface/src/lib.rs index d25319e11d5..8ed164fc217 100644 --- a/tooling/backend_interface/src/lib.rs +++ b/tooling/backend_interface/src/lib.rs @@ -1,15 +1,14 @@ #![warn(unused_crate_dependencies, unused_extern_crates)] #![warn(unreachable_pub)] -use std::{collections::HashSet, path::PathBuf}; +use std::path::PathBuf; mod cli; mod download; mod proof_system; mod smart_contract; -use acvm::acir::circuit::Opcode; -use bb_abstraction_leaks::ACVM_BACKEND_BARRETENBERG; +pub use bb_abstraction_leaks::ACVM_BACKEND_BARRETENBERG; use bb_abstraction_leaks::BB_VERSION; use cli::VersionCommand; pub use download::download_backend; @@ -135,54 +134,6 @@ impl Backend { } } -pub struct BackendOpcodeSupport { - opcodes: HashSet, - black_box_functions: HashSet, -} - -impl BackendOpcodeSupport { - pub fn is_opcode_supported(&self, opcode: &Opcode) -> bool { - match opcode { - Opcode::Arithmetic(_) => self.opcodes.contains("arithmetic"), - Opcode::Directive(_) => self.opcodes.contains("directive"), - Opcode::Brillig(_) => self.opcodes.contains("brillig"), - Opcode::MemoryInit { .. } => self.opcodes.contains("memory_init"), - Opcode::MemoryOp { .. } => self.opcodes.contains("memory_op"), - Opcode::BlackBoxFuncCall(func) => { - self.black_box_functions.contains(func.get_black_box_func().name()) - } - } - } - - pub fn all() -> BackendOpcodeSupport { - BackendOpcodeSupport { - opcodes: HashSet::from([ - "arithmetic".to_string(), - "directive".to_string(), - "brillig".to_string(), - "memory_init".to_string(), - "memory_op".to_string(), - ]), - black_box_functions: HashSet::from([ - "sha256".to_string(), - "schnorr_verify".to_string(), - "blake2s".to_string(), - "pedersen".to_string(), - "pedersen_hash".to_string(), - "hash_to_field_128_security".to_string(), - "ecdsa_secp256k1".to_string(), - "fixed_base_scalar_mul".to_string(), - "and".to_string(), - "xor".to_string(), - "range".to_string(), - "keccak256".to_string(), - "recursive_aggregation".to_string(), - "ecdsa_secp256r1".to_string(), - ]), - } - } -} - #[cfg(test)] mod backend { use crate::{Backend, BackendError}; diff --git a/tooling/backend_interface/src/proof_system.rs b/tooling/backend_interface/src/proof_system.rs index bb47603bbf7..01842a81da9 100644 --- a/tooling/backend_interface/src/proof_system.rs +++ b/tooling/backend_interface/src/proof_system.rs @@ -3,15 +3,15 @@ use std::io::Write; use std::path::Path; use acvm::acir::{circuit::Circuit, native_types::WitnessMap}; +use acvm::ExpressionWidth; use acvm::FieldElement; -use acvm::Language; use tempfile::tempdir; use crate::cli::{ GatesCommand, InfoCommand, ProofAsFieldsCommand, ProveCommand, VerifyCommand, VkAsFieldsCommand, WriteVkCommand, }; -use crate::{Backend, BackendError, BackendOpcodeSupport}; +use crate::{Backend, BackendError}; impl Backend { pub fn get_exact_circuit_size(&self, circuit: &Circuit) -> Result { @@ -30,21 +30,22 @@ impl Backend { .run(binary_path) } - pub fn get_backend_info(&self) -> Result<(Language, BackendOpcodeSupport), BackendError> { + pub fn get_backend_info(&self) -> Result { let binary_path = self.assert_binary_exists()?; self.assert_correct_version()?; InfoCommand { crs_path: self.crs_directory() }.run(binary_path) } - /// If we cannot get a valid backend, returns the default backend which supports all the opcodes - /// and uses Plonk with width 3 + /// If we cannot get a valid backend, returns `ExpressionWidth::Bound { width: 3 }`` /// The function also prints a message saying we could not find a backend - pub fn get_backend_info_or_default(&self) -> (Language, BackendOpcodeSupport) { - if let Ok(backend_info) = self.get_backend_info() { - (backend_info.0, backend_info.1) + pub fn get_backend_info_or_default(&self) -> ExpressionWidth { + if let Ok(expression_width) = self.get_backend_info() { + expression_width } else { - log::warn!("No valid backend found, defaulting to Plonk with width 3 and all opcodes supported"); - (Language::PLONKCSat { width: 3 }, BackendOpcodeSupport::all()) + log::warn!( + "No valid backend found, ExpressionWidth defaulting to Bounded with a width of 3" + ); + ExpressionWidth::Bounded { width: 3 } } } diff --git a/tooling/lsp/src/requests/profile_run.rs b/tooling/lsp/src/requests/profile_run.rs index 84888d30ba5..4c4d7f11fde 100644 --- a/tooling/lsp/src/requests/profile_run.rs +++ b/tooling/lsp/src/requests/profile_run.rs @@ -3,7 +3,7 @@ use std::{ future::{self, Future}, }; -use acvm::{acir::circuit::Opcode, Language}; +use acvm::ExpressionWidth; use async_lsp::{ErrorCode, ResponseError}; use nargo::artifacts::debug::DebugArtifact; use nargo_toml::{find_package_manifest, resolve_workspace_from_toml, PackageSelection}; @@ -57,16 +57,13 @@ fn on_profile_run_request_inner( .cloned() .partition(|package| package.is_binary()); - // # TODO(#3504): Consider how to incorporate Backend relevant information in wider context. - let is_opcode_supported = |_opcode: &Opcode| true; - let np_language = Language::PLONKCSat { width: 3 }; + let expression_width = ExpressionWidth::Bounded { width: 3 }; let (compiled_programs, compiled_contracts) = nargo::ops::compile_workspace( &workspace, &binary_packages, &contract_packages, - np_language, - is_opcode_supported, + expression_width, &CompileOptions::default(), ) .map_err(|err| ResponseError::new(ErrorCode::REQUEST_FAILED, err))?; diff --git a/tooling/nargo/Cargo.toml b/tooling/nargo/Cargo.toml index 48741c367a5..f0733d7ad44 100644 --- a/tooling/nargo/Cargo.toml +++ b/tooling/nargo/Cargo.toml @@ -24,9 +24,10 @@ iter-extended.workspace = true serde.workspace = true thiserror.workspace = true codespan-reporting.workspace = true +log.workspace = true rayon = "1.8.0" [dev-dependencies] # TODO: This dependency is used to generate unit tests for `get_all_paths_in_dir` # TODO: once that method is moved to nargo_cli, we can move this dependency to nargo_cli -tempfile = "3.2.0" \ No newline at end of file +tempfile.workspace = true \ No newline at end of file diff --git a/tooling/nargo/src/artifacts/contract.rs b/tooling/nargo/src/artifacts/contract.rs index f9e8d45b02e..4ade4f5660e 100644 --- a/tooling/nargo/src/artifacts/contract.rs +++ b/tooling/nargo/src/artifacts/contract.rs @@ -14,8 +14,6 @@ pub struct PreprocessedContract { pub noir_version: String, /// The name of the contract. pub name: String, - /// The identifier of the proving backend which this contract has been compiled for. - pub backend: String, /// Each of the contract's functions are compiled into a separate program stored in this `Vec`. pub functions: Vec, /// All the events defined inside the contract scope. diff --git a/tooling/nargo/src/artifacts/debug.rs b/tooling/nargo/src/artifacts/debug.rs index 40acc7db8f8..324c476d13d 100644 --- a/tooling/nargo/src/artifacts/debug.rs +++ b/tooling/nargo/src/artifacts/debug.rs @@ -34,7 +34,7 @@ impl DebugArtifact { .collect(); for file_id in files_with_debug_symbols { - let file_source = file_manager.fetch_file(file_id).source(); + let file_source = file_manager.fetch_file(file_id); file_map.insert( file_id, diff --git a/tooling/nargo/src/artifacts/program.rs b/tooling/nargo/src/artifacts/program.rs index 890b6c55f7d..664db0adca4 100644 --- a/tooling/nargo/src/artifacts/program.rs +++ b/tooling/nargo/src/artifacts/program.rs @@ -17,7 +17,6 @@ pub struct PreprocessedProgram { /// Used to short-circuit compilation in the case of the source code not changing since the last compilation. pub hash: u64, - pub backend: String, pub abi: Abi, #[serde( diff --git a/tooling/nargo/src/errors.rs b/tooling/nargo/src/errors.rs index bca8ca24767..c743768bee2 100644 --- a/tooling/nargo/src/errors.rs +++ b/tooling/nargo/src/errors.rs @@ -47,12 +47,6 @@ pub enum NargoError { ForeignCallError(#[from] ForeignCallError), } -impl From for NargoError { - fn from(_: acvm::compiler::CompileError) -> Self { - NargoError::CompilationError - } -} - impl NargoError { /// Extracts the user defined failure message from the ExecutionError /// If one exists. @@ -69,7 +63,6 @@ impl NargoError { ExecutionError::AssertionFailed(message, _) => Some(message), ExecutionError::SolvingError(error) => match error { OpcodeResolutionError::IndexOutOfBounds { .. } - | OpcodeResolutionError::UnsupportedBlackBoxFunc(_) | OpcodeResolutionError::OpcodeNotSolvable(_) | OpcodeResolutionError::UnsatisfiedConstrain { .. } => None, OpcodeResolutionError::BrilligFunctionFailed { message, .. } => Some(message), diff --git a/tooling/nargo/src/lib.rs b/tooling/nargo/src/lib.rs index 6f3d36febba..f0c7277060f 100644 --- a/tooling/nargo/src/lib.rs +++ b/tooling/nargo/src/lib.rs @@ -19,7 +19,7 @@ use std::collections::BTreeMap; use fm::FileManager; use noirc_driver::{add_dep, prepare_crate, prepare_dependency}; use noirc_frontend::{ - graph::{CrateGraph, CrateId, CrateName}, + graph::{CrateId, CrateName}, hir::Context, }; use package::{Dependency, Package}; @@ -91,8 +91,7 @@ pub fn prepare_package(package: &Package) -> (Context, CrateId) { let mut fm = FileManager::new(&package.root_dir); insert_all_files_for_package_into_file_manager(package, &mut fm); - let graph = CrateGraph::default(); - let mut context = Context::new(fm, graph); + let mut context = Context::new(fm); let crate_id = prepare_crate(&mut context, &package.entry_path); diff --git a/tooling/nargo/src/ops/compile.rs b/tooling/nargo/src/ops/compile.rs index 59ac5672a11..1a9e0a6c115 100644 --- a/tooling/nargo/src/ops/compile.rs +++ b/tooling/nargo/src/ops/compile.rs @@ -1,4 +1,4 @@ -use acvm::{acir::circuit::Opcode, Language}; +use acvm::ExpressionWidth; use fm::FileManager; use noirc_driver::{CompilationResult, CompileOptions, CompiledContract, CompiledProgram}; @@ -17,23 +17,18 @@ pub fn compile_workspace( workspace: &Workspace, binary_packages: &[Package], contract_packages: &[Package], - np_language: Language, - is_opcode_supported: impl Fn(&Opcode) -> bool + std::marker::Sync, + expression_width: ExpressionWidth, compile_options: &CompileOptions, ) -> Result<(Vec, Vec), CompileError> { // Compile all of the packages in parallel. let program_results: Vec<(FileManager, CompilationResult)> = binary_packages .par_iter() - .map(|package| { - compile_program(workspace, package, compile_options, np_language, &is_opcode_supported) - }) + .map(|package| compile_program(workspace, package, compile_options, expression_width)) .collect(); let contract_results: Vec<(FileManager, CompilationResult)> = contract_packages .par_iter() - .map(|package| { - compile_contract(package, compile_options, np_language, &is_opcode_supported) - }) + .map(|package| compile_contract(package, compile_options, expression_width)) .collect(); // Report any warnings/errors which were encountered during compilation. @@ -67,8 +62,7 @@ pub fn compile_program( workspace: &Workspace, package: &Package, compile_options: &CompileOptions, - np_language: Language, - is_opcode_supported: &impl Fn(&Opcode) -> bool, + expression_width: ExpressionWidth, ) -> (FileManager, CompilationResult) { let (mut context, crate_id) = prepare_package(package); @@ -85,8 +79,7 @@ pub fn compile_program( }; // Apply backend specific optimizations. - let optimized_program = crate::ops::optimize_program(program, np_language, is_opcode_supported) - .expect("Backend does not support an opcode that is in the IR"); + let optimized_program = crate::ops::optimize_program(program, expression_width); (context.file_manager, Ok((optimized_program, warnings))) } @@ -94,8 +87,7 @@ pub fn compile_program( fn compile_contract( package: &Package, compile_options: &CompileOptions, - np_language: Language, - is_opcode_supported: &impl Fn(&Opcode) -> bool, + expression_width: ExpressionWidth, ) -> (FileManager, CompilationResult) { let (mut context, crate_id) = prepare_package(package); let (contract, warnings) = @@ -106,9 +98,7 @@ fn compile_contract( } }; - let optimized_contract = - crate::ops::optimize_contract(contract, np_language, &is_opcode_supported) - .expect("Backend does not support an opcode that is in the IR"); + let optimized_contract = crate::ops::optimize_contract(contract, expression_width); (context.file_manager, Ok((optimized_contract, warnings))) } diff --git a/tooling/nargo/src/ops/execute.rs b/tooling/nargo/src/ops/execute.rs index d7cb44188c4..2ac85781410 100644 --- a/tooling/nargo/src/ops/execute.rs +++ b/tooling/nargo/src/ops/execute.rs @@ -13,6 +13,7 @@ pub fn execute_circuit( blackbox_solver: &B, foreign_call_executor: &mut F, ) -> Result { + log::trace!("Start circuit execution"); let mut acvm = ACVM::new(blackbox_solver, &circuit.opcodes, initial_witness); loop { @@ -55,5 +56,7 @@ pub fn execute_circuit( } let solved_witness = acvm.finalize(); + + log::trace!("Finish circuit execution"); Ok(solved_witness) } diff --git a/tooling/nargo/src/ops/optimize.rs b/tooling/nargo/src/ops/optimize.rs index 54e2432aa40..d3a36dd65ac 100644 --- a/tooling/nargo/src/ops/optimize.rs +++ b/tooling/nargo/src/ops/optimize.rs @@ -1,34 +1,30 @@ -use acvm::{acir::circuit::Opcode, Language}; -use iter_extended::try_vecmap; +use acvm::ExpressionWidth; +use iter_extended::vecmap; use noirc_driver::{CompiledContract, CompiledProgram}; -use crate::NargoError; - pub fn optimize_program( mut program: CompiledProgram, - np_language: Language, - is_opcode_supported: &impl Fn(&Opcode) -> bool, -) -> Result { + expression_width: ExpressionWidth, +) -> CompiledProgram { let (optimized_circuit, location_map) = - acvm::compiler::compile(program.circuit, np_language, is_opcode_supported)?; + acvm::compiler::compile(program.circuit, expression_width); program.circuit = optimized_circuit; program.debug.update_acir(location_map); - Ok(program) + program } pub fn optimize_contract( contract: CompiledContract, - np_language: Language, - is_opcode_supported: &impl Fn(&Opcode) -> bool, -) -> Result { - let functions = try_vecmap(contract.functions, |mut func| { + expression_width: ExpressionWidth, +) -> CompiledContract { + let functions = vecmap(contract.functions, |mut func| { let (optimized_bytecode, location_map) = - acvm::compiler::compile(func.bytecode, np_language, is_opcode_supported)?; + acvm::compiler::compile(func.bytecode, expression_width); func.bytecode = optimized_bytecode; func.debug.update_acir(location_map); - Ok::<_, NargoError>(func) - })?; + func + }); - Ok(CompiledContract { functions, ..contract }) + CompiledContract { functions, ..contract } } diff --git a/tooling/nargo_cli/Cargo.toml b/tooling/nargo_cli/Cargo.toml index 008b1233cd8..2f99fefb778 100644 --- a/tooling/nargo_cli/Cargo.toml +++ b/tooling/nargo_cli/Cargo.toml @@ -62,7 +62,7 @@ tracing-appender = "0.2.3" tokio-util = { version = "0.7.8", features = ["compat"] } [dev-dependencies] -tempfile = "3.6.0" +tempfile.workspace = true dirs.workspace = true assert_cmd = "2.0.8" assert_fs = "1.0.10" diff --git a/tooling/nargo_cli/src/backends.rs b/tooling/nargo_cli/src/backends.rs index 8b1da2cd118..2b3e9d8861f 100644 --- a/tooling/nargo_cli/src/backends.rs +++ b/tooling/nargo_cli/src/backends.rs @@ -7,7 +7,7 @@ fn active_backend_file_path() -> PathBuf { backends_directory().join(".selected_backend") } -pub(crate) const ACVM_BACKEND_BARRETENBERG: &str = "acvm-backend-barretenberg"; +pub(crate) use backend_interface::ACVM_BACKEND_BARRETENBERG; pub(crate) fn clear_active_backend() { let active_backend_file = active_backend_file_path(); diff --git a/tooling/nargo_cli/src/cli/codegen_verifier_cmd.rs b/tooling/nargo_cli/src/cli/codegen_verifier_cmd.rs index 02c83adb59a..b72ce01e1a9 100644 --- a/tooling/nargo_cli/src/cli/codegen_verifier_cmd.rs +++ b/tooling/nargo_cli/src/cli/codegen_verifier_cmd.rs @@ -6,8 +6,7 @@ use super::{ use crate::backends::Backend; use crate::errors::CliError; -use acvm::Language; -use backend_interface::BackendOpcodeSupport; +use acvm::ExpressionWidth; use bb_abstraction_leaks::ACVM_BACKEND_BARRETENBERG; use clap::Args; use nargo::package::Package; @@ -46,15 +45,14 @@ pub(crate) fn run( Some(NOIR_ARTIFACT_VERSION_STRING.to_string()), )?; - let (np_language, opcode_support) = backend.get_backend_info()?; + let expression_width = backend.get_backend_info()?; for package in &workspace { let smart_contract_string = smart_contract_for_package( &workspace, backend, package, &args.compile_options, - np_language, - &opcode_support, + expression_width, )?; let contract_dir = workspace.contracts_directory_path(package); @@ -73,11 +71,9 @@ fn smart_contract_for_package( backend: &Backend, package: &Package, compile_options: &CompileOptions, - np_language: Language, - opcode_support: &BackendOpcodeSupport, + expression_width: ExpressionWidth, ) -> Result { - let program = - compile_bin_package(workspace, package, compile_options, np_language, opcode_support)?; + let program = compile_bin_package(workspace, package, compile_options, expression_width)?; let mut smart_contract_string = backend.eth_contract(&program.circuit)?; diff --git a/tooling/nargo_cli/src/cli/compile_cmd.rs b/tooling/nargo_cli/src/cli/compile_cmd.rs index 043c0841958..5ee053c5088 100644 --- a/tooling/nargo_cli/src/cli/compile_cmd.rs +++ b/tooling/nargo_cli/src/cli/compile_cmd.rs @@ -1,8 +1,6 @@ use std::path::Path; -use acvm::acir::circuit::Opcode; -use acvm::Language; -use backend_interface::BackendOpcodeSupport; +use acvm::ExpressionWidth; use fm::FileManager; use iter_extended::vecmap; use nargo::artifacts::contract::PreprocessedContract; @@ -31,9 +29,6 @@ use super::fs::program::{ use super::NargoConfig; use rayon::prelude::*; -// TODO(#1388): pull this from backend. -const BACKEND_IDENTIFIER: &str = "acvm-backend-barretenberg"; - /// Compile the program and its secret execution trace into ACIR format #[derive(Debug, Clone, Args)] pub(crate) struct CompileCommand { @@ -72,13 +67,12 @@ pub(crate) fn run( .cloned() .partition(|package| package.is_binary()); - let (np_language, opcode_support) = backend.get_backend_info_or_default(); + let expression_width = backend.get_backend_info_or_default(); let (_, compiled_contracts) = compile_workspace( &workspace, &binary_packages, &contract_packages, - np_language, - &opcode_support, + expression_width, &args.compile_options, )?; @@ -94,25 +88,18 @@ pub(super) fn compile_workspace( workspace: &Workspace, binary_packages: &[Package], contract_packages: &[Package], - np_language: Language, - opcode_support: &BackendOpcodeSupport, + expression_width: ExpressionWidth, compile_options: &CompileOptions, ) -> Result<(Vec, Vec), CliError> { // Compile all of the packages in parallel. let program_results: Vec<(FileManager, CompilationResult)> = binary_packages .par_iter() - .map(|package| { - let is_opcode_supported = |opcode: &_| opcode_support.is_opcode_supported(opcode); - compile_program(workspace, package, compile_options, np_language, &is_opcode_supported) - }) + .map(|package| compile_program(workspace, package, compile_options, expression_width)) .collect(); let contract_results: Vec<(FileManager, CompilationResult)> = contract_packages .par_iter() - .map(|package| { - let is_opcode_supported = |opcode: &_| opcode_support.is_opcode_supported(opcode); - compile_contract(package, compile_options, np_language, &is_opcode_supported) - }) + .map(|package| compile_contract(package, compile_options, expression_width)) .collect(); // Report any warnings/errors which were encountered during compilation. @@ -146,17 +133,14 @@ pub(crate) fn compile_bin_package( workspace: &Workspace, package: &Package, compile_options: &CompileOptions, - np_language: Language, - opcode_support: &BackendOpcodeSupport, + expression_width: ExpressionWidth, ) -> Result { if package.is_library() { return Err(CompileError::LibraryCrate(package.name.clone()).into()); } let (file_manager, compilation_result) = - compile_program(workspace, package, compile_options, np_language, &|opcode| { - opcode_support.is_opcode_supported(opcode) - }); + compile_program(workspace, package, compile_options, expression_width); let program = report_errors( compilation_result, @@ -172,8 +156,7 @@ fn compile_program( workspace: &Workspace, package: &Package, compile_options: &CompileOptions, - np_language: Language, - is_opcode_supported: &impl Fn(&Opcode) -> bool, + expression_width: ExpressionWidth, ) -> (FileManager, CompilationResult) { let (mut context, crate_id) = prepare_package(package); @@ -213,8 +196,7 @@ fn compile_program( }; // Apply backend specific optimizations. - let optimized_program = nargo::ops::optimize_program(program, np_language, is_opcode_supported) - .expect("Backend does not support an opcode that is in the IR"); + let optimized_program = nargo::ops::optimize_program(program, expression_width); let only_acir = compile_options.only_acir; save_program(optimized_program.clone(), package, &workspace.target_directory_path(), only_acir); @@ -224,8 +206,7 @@ fn compile_program( fn compile_contract( package: &Package, compile_options: &CompileOptions, - np_language: Language, - is_opcode_supported: &impl Fn(&Opcode) -> bool, + expression_width: ExpressionWidth, ) -> (FileManager, CompilationResult) { let (mut context, crate_id) = prepare_package(package); let (contract, warnings) = @@ -236,9 +217,7 @@ fn compile_contract( } }; - let optimized_contract = - nargo::ops::optimize_contract(contract, np_language, &is_opcode_supported) - .expect("Backend does not support an opcode that is in the IR"); + let optimized_contract = nargo::ops::optimize_contract(contract, expression_width); (context.file_manager, Ok((optimized_contract, warnings))) } @@ -251,7 +230,6 @@ fn save_program( ) { let preprocessed_program = PreprocessedProgram { hash: program.hash, - backend: String::from(BACKEND_IDENTIFIER), abi: program.abi, noir_version: program.noir_version, bytecode: program.circuit, @@ -293,7 +271,6 @@ fn save_contract(contract: CompiledContract, package: &Package, circuit_dir: &Pa let preprocessed_contract = PreprocessedContract { noir_version: contract.noir_version, name: contract.name, - backend: String::from(BACKEND_IDENTIFIER), functions: preprocessed_functions, events: contract.events, }; diff --git a/tooling/nargo_cli/src/cli/debug_cmd.rs b/tooling/nargo_cli/src/cli/debug_cmd.rs index 5204e0f122c..6eab626a08d 100644 --- a/tooling/nargo_cli/src/cli/debug_cmd.rs +++ b/tooling/nargo_cli/src/cli/debug_cmd.rs @@ -49,7 +49,7 @@ pub(crate) fn run( Some(NOIR_ARTIFACT_VERSION_STRING.to_string()), )?; let target_dir = &workspace.target_directory_path(); - let (np_language, opcode_support) = backend.get_backend_info()?; + let expression_width = backend.get_backend_info()?; let Some(package) = workspace.into_iter().find(|p| p.is_binary()) else { println!( @@ -58,13 +58,8 @@ pub(crate) fn run( return Ok(()); }; - let compiled_program = compile_bin_package( - &workspace, - package, - &args.compile_options, - np_language, - &opcode_support, - )?; + let compiled_program = + compile_bin_package(&workspace, package, &args.compile_options, expression_width)?; run_async(package, compiled_program, &args.prover_name, &args.witness_name, target_dir) } diff --git a/tooling/nargo_cli/src/cli/execute_cmd.rs b/tooling/nargo_cli/src/cli/execute_cmd.rs index 2f69b4c7df7..10760f43a45 100644 --- a/tooling/nargo_cli/src/cli/execute_cmd.rs +++ b/tooling/nargo_cli/src/cli/execute_cmd.rs @@ -56,15 +56,10 @@ pub(crate) fn run( )?; let target_dir = &workspace.target_directory_path(); - let (np_language, opcode_support) = backend.get_backend_info_or_default(); + let expression_width = backend.get_backend_info_or_default(); for package in &workspace { - let compiled_program = compile_bin_package( - &workspace, - package, - &args.compile_options, - np_language, - &opcode_support, - )?; + let compiled_program = + compile_bin_package(&workspace, package, &args.compile_options, expression_width)?; let (return_value, solved_witness) = execute_program_and_decode(compiled_program, package, &args.prover_name)?; diff --git a/tooling/nargo_cli/src/cli/fmt_cmd.rs b/tooling/nargo_cli/src/cli/fmt_cmd.rs index 0c2ca71eba3..e62fc560217 100644 --- a/tooling/nargo_cli/src/cli/fmt_cmd.rs +++ b/tooling/nargo_cli/src/cli/fmt_cmd.rs @@ -62,7 +62,7 @@ pub(crate) fn run(args: FormatCommand, config: NargoConfig) -> Result<(), CliErr return Ok(()); } - let original = file_manager.fetch_file(file_id).source(); + let original = file_manager.fetch_file(file_id); let formatted = nargo_fmt::format(original, parsed_module, &config); if check_mode { diff --git a/tooling/nargo_cli/src/cli/info_cmd.rs b/tooling/nargo_cli/src/cli/info_cmd.rs index b0f771bfc1c..e25051c1df7 100644 --- a/tooling/nargo_cli/src/cli/info_cmd.rs +++ b/tooling/nargo_cli/src/cli/info_cmd.rs @@ -1,6 +1,6 @@ use std::collections::HashMap; -use acvm::Language; +use acvm::ExpressionWidth; use backend_interface::BackendError; use clap::Args; use iter_extended::vecmap; @@ -67,13 +67,12 @@ pub(crate) fn run( .cloned() .partition(|package| package.is_binary()); - let (np_language, opcode_support) = backend.get_backend_info_or_default(); + let expression_width = backend.get_backend_info_or_default(); let (compiled_programs, compiled_contracts) = compile_workspace( &workspace, &binary_packages, &contract_packages, - np_language, - &opcode_support, + expression_width, &args.compile_options, )?; @@ -98,13 +97,13 @@ pub(crate) fn run( .into_par_iter() .zip(compiled_programs) .map(|(package, program)| { - count_opcodes_and_gates_in_program(backend, program, &package, np_language) + count_opcodes_and_gates_in_program(backend, program, &package, expression_width) }) .collect::>()?; let contract_info = compiled_contracts .into_par_iter() - .map(|contract| count_opcodes_and_gates_in_contract(backend, contract, np_language)) + .map(|contract| count_opcodes_and_gates_in_contract(backend, contract, expression_width)) .collect::>()?; let info_report = InfoReport { programs: program_info, contracts: contract_info }; @@ -115,7 +114,7 @@ pub(crate) fn run( } else { // Otherwise print human-readable table. if !info_report.programs.is_empty() { - let mut program_table = table!([Fm->"Package", Fm->"Language", Fm->"ACIR Opcodes", Fm->"Backend Circuit Size"]); + let mut program_table = table!([Fm->"Package", Fm->"Expression Width", Fm->"ACIR Opcodes", Fm->"Backend Circuit Size"]); for program in info_report.programs { program_table.add_row(program.into()); @@ -126,7 +125,7 @@ pub(crate) fn run( let mut contract_table = table!([ Fm->"Contract", Fm->"Function", - Fm->"Language", + Fm->"Expression Width", Fm->"ACIR Opcodes", Fm->"Backend Circuit Size" ]); @@ -203,7 +202,7 @@ struct InfoReport { struct ProgramInfo { name: String, #[serde(skip)] - language: Language, + expression_width: ExpressionWidth, acir_opcodes: usize, circuit_size: u32, } @@ -212,7 +211,7 @@ impl From for Row { fn from(program_info: ProgramInfo) -> Self { row![ Fm->format!("{}", program_info.name), - format!("{:?}", program_info.language), + format!("{:?}", program_info.expression_width), Fc->format!("{}", program_info.acir_opcodes), Fc->format!("{}", program_info.circuit_size), ] @@ -223,7 +222,7 @@ impl From for Row { struct ContractInfo { name: String, #[serde(skip)] - language: Language, + expression_width: ExpressionWidth, functions: Vec, } @@ -240,7 +239,7 @@ impl From for Vec { row![ Fm->format!("{}", contract_info.name), Fc->format!("{}", function.name), - format!("{:?}", contract_info.language), + format!("{:?}", contract_info.expression_width), Fc->format!("{}", function.acir_opcodes), Fc->format!("{}", function.circuit_size), ] @@ -252,11 +251,11 @@ fn count_opcodes_and_gates_in_program( backend: &Backend, compiled_program: CompiledProgram, package: &Package, - language: Language, + expression_width: ExpressionWidth, ) -> Result { Ok(ProgramInfo { name: package.name.to_string(), - language, + expression_width, acir_opcodes: compiled_program.circuit.opcodes.len(), circuit_size: backend.get_exact_circuit_size(&compiled_program.circuit)?, }) @@ -265,7 +264,7 @@ fn count_opcodes_and_gates_in_program( fn count_opcodes_and_gates_in_contract( backend: &Backend, contract: CompiledContract, - language: Language, + expression_width: ExpressionWidth, ) -> Result { let functions = contract .functions @@ -279,5 +278,5 @@ fn count_opcodes_and_gates_in_contract( }) .collect::>()?; - Ok(ContractInfo { name: contract.name, language, functions }) + Ok(ContractInfo { name: contract.name, expression_width, functions }) } diff --git a/tooling/nargo_cli/src/cli/init_cmd.rs b/tooling/nargo_cli/src/cli/init_cmd.rs index e53c2e4cdc9..dd3af97ecd6 100644 --- a/tooling/nargo_cli/src/cli/init_cmd.rs +++ b/tooling/nargo_cli/src/cli/init_cmd.rs @@ -1,4 +1,3 @@ -use crate::backends::Backend; use crate::errors::CliError; use super::fs::{create_named_dir, write_to_file}; @@ -34,12 +33,7 @@ const BIN_EXAMPLE: &str = include_str!("./noir_template_files/binary.nr"); const CONTRACT_EXAMPLE: &str = include_str!("./noir_template_files/contract.nr"); const LIB_EXAMPLE: &str = include_str!("./noir_template_files/library.nr"); -pub(crate) fn run( - // Backend is currently unused, but we might want to use it to inform the "new" template in the future - _backend: &Backend, - args: InitCommand, - config: NargoConfig, -) -> Result<(), CliError> { +pub(crate) fn run(args: InitCommand, config: NargoConfig) -> Result<(), CliError> { let package_name = match args.name { Some(name) => name, None => { diff --git a/tooling/nargo_cli/src/cli/mod.rs b/tooling/nargo_cli/src/cli/mod.rs index 88c6b57a98c..448e28fb6a7 100644 --- a/tooling/nargo_cli/src/cli/mod.rs +++ b/tooling/nargo_cli/src/cli/mod.rs @@ -100,7 +100,7 @@ pub(crate) fn start_cli() -> eyre::Result<()> { match command { NargoCommand::New(args) => new_cmd::run(&backend, args, config), - NargoCommand::Init(args) => init_cmd::run(&backend, args, config), + NargoCommand::Init(args) => init_cmd::run(args, config), NargoCommand::Check(args) => check_cmd::run(&backend, args, config), NargoCommand::Compile(args) => compile_cmd::run(&backend, args, config), NargoCommand::Debug(args) => debug_cmd::run(&backend, args, config), diff --git a/tooling/nargo_cli/src/cli/prove_cmd.rs b/tooling/nargo_cli/src/cli/prove_cmd.rs index 54b148ec3a2..cb1751e7cef 100644 --- a/tooling/nargo_cli/src/cli/prove_cmd.rs +++ b/tooling/nargo_cli/src/cli/prove_cmd.rs @@ -57,15 +57,10 @@ pub(crate) fn run( Some(NOIR_ARTIFACT_VERSION_STRING.to_string()), )?; - let (np_language, opcode_support) = backend.get_backend_info()?; + let expression_width = backend.get_backend_info()?; for package in &workspace { - let program = compile_bin_package( - &workspace, - package, - &args.compile_options, - np_language, - &opcode_support, - )?; + let program = + compile_bin_package(&workspace, package, &args.compile_options, expression_width)?; prove_package( backend, diff --git a/tooling/nargo_cli/src/cli/verify_cmd.rs b/tooling/nargo_cli/src/cli/verify_cmd.rs index 2f8a6efbba4..9659286b5ab 100644 --- a/tooling/nargo_cli/src/cli/verify_cmd.rs +++ b/tooling/nargo_cli/src/cli/verify_cmd.rs @@ -48,15 +48,10 @@ pub(crate) fn run( Some(NOIR_ARTIFACT_VERSION_STRING.to_string()), )?; - let (np_language, opcode_support) = backend.get_backend_info()?; + let expression_width = backend.get_backend_info()?; for package in &workspace { - let program = compile_bin_package( - &workspace, - package, - &args.compile_options, - np_language, - &opcode_support, - )?; + let program = + compile_bin_package(&workspace, package, &args.compile_options, expression_width)?; verify_package(backend, &workspace, package, program, &args.verifier_name)?; } diff --git a/tooling/nargo_toml/src/lib.rs b/tooling/nargo_toml/src/lib.rs index 56024f8ed42..6c77fe85f2b 100644 --- a/tooling/nargo_toml/src/lib.rs +++ b/tooling/nargo_toml/src/lib.rs @@ -270,7 +270,6 @@ struct PackageMetadata { // We also state that ACIR and the compiler will upgrade in lockstep. // so you will not need to supply an ACIR and compiler version compiler_version: Option, - backend: Option, license: Option, } diff --git a/tooling/noir_codegen/package.json b/tooling/noir_codegen/package.json index 52ad0321b85..3ae31d9834f 100644 --- a/tooling/noir_codegen/package.json +++ b/tooling/noir_codegen/package.json @@ -3,7 +3,7 @@ "collaborators": [ "The Noir Team " ], - "version": "0.20.0", + "version": "0.22.0", "packageManager": "yarn@3.5.1", "license": "(MIT OR Apache-2.0)", "type": "module", diff --git a/tooling/noir_js/package.json b/tooling/noir_js/package.json index e42cbb1d162..d5ea4b4ad5d 100644 --- a/tooling/noir_js/package.json +++ b/tooling/noir_js/package.json @@ -3,7 +3,7 @@ "collaborators": [ "The Noir Team " ], - "version": "0.20.0", + "version": "0.22.0", "packageManager": "yarn@3.5.1", "license": "(MIT OR Apache-2.0)", "type": "module", diff --git a/tooling/noir_js_backend_barretenberg/package.json b/tooling/noir_js_backend_barretenberg/package.json index 623a290410c..c34b8dfc825 100644 --- a/tooling/noir_js_backend_barretenberg/package.json +++ b/tooling/noir_js_backend_barretenberg/package.json @@ -3,7 +3,7 @@ "collaborators": [ "The Noir Team " ], - "version": "0.20.0", + "version": "0.22.0", "packageManager": "yarn@3.5.1", "license": "(MIT OR Apache-2.0)", "type": "module", diff --git a/tooling/noir_js_types/package.json b/tooling/noir_js_types/package.json index b322b9b6207..51856cfe465 100644 --- a/tooling/noir_js_types/package.json +++ b/tooling/noir_js_types/package.json @@ -4,7 +4,7 @@ "The Noir Team " ], "packageManager": "yarn@3.5.1", - "version": "0.20.0", + "version": "0.22.0", "license": "(MIT OR Apache-2.0)", "files": [ "lib", diff --git a/tooling/noirc_abi_wasm/package.json b/tooling/noirc_abi_wasm/package.json index fe801a40d5e..5b2cd344eab 100644 --- a/tooling/noirc_abi_wasm/package.json +++ b/tooling/noirc_abi_wasm/package.json @@ -3,7 +3,7 @@ "collaborators": [ "The Noir Team " ], - "version": "0.20.0", + "version": "0.22.0", "license": "(MIT OR Apache-2.0)", "files": [ "nodejs", diff --git a/versions.json b/versions.json deleted file mode 100644 index a1c826264f4..00000000000 --- a/versions.json +++ /dev/null @@ -1,4 +0,0 @@ -[ - "v0.19.4", - "v0.17.0" -] \ No newline at end of file